Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
5,400
|
<ASSISTANT_TASK:>
Python Code:
# Import relevant modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from NPTFit import psf_correction as pc # Module for determining the PSF correction
from __future__ import print_function
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812)
f_ary_1 = pc_inst.f_ary
df_rho_div_f_ary_1 = pc_inst.df_rho_div_f_ary
print('f_ary:', f_ary_1)
print('df_rho_div_f_ary:', df_rho_div_f_ary_1)
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='black', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.title('Gaussian PSF, $\sigma_\mathrm{PSF} = 0.1812$', y=1.04)
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.05)
f_ary_2 = pc_inst.f_ary
df_rho_div_f_ary_2 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.4)
f_ary_3 = pc_inst.f_ary
df_rho_div_f_ary_3 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='cornflowerblue',label='0.18', lw = 1.5)
plt.plot(f_ary_2,f_ary_2**2*df_rho_div_f_ary_2/(f_ary_2[1]-f_ary_2[0]),color='forestgreen',label='0.05', lw = 1.5)
plt.plot(f_ary_3,f_ary_3**2*df_rho_div_f_ary_3/(f_ary_3[1]-f_ary_3[0]),color='maroon',label='0.4', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper right', fancybox=True)
plt.title('Varying $\sigma_\mathrm{PSF}$', y=1.04)
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,num_f_bins=20)
f_ary_4 = pc_inst.f_ary
df_rho_div_f_ary_4 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,n_psf=5000,n_pts_per_psf=100)
f_ary_5 = pc_inst.f_ary
df_rho_div_f_ary_5 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,f_trunc=0.1)
f_ary_6 = pc_inst.f_ary
df_rho_div_f_ary_6 = pc_inst.df_rho_div_f_ary
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812,nside=64)
f_ary_7 = pc_inst.f_ary
df_rho_div_f_ary_7 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_1,f_ary_1**2*df_rho_div_f_ary_1/(f_ary_1[1]-f_ary_1[0]),color='black',label=r'Default', lw=2.2)
plt.plot(f_ary_4,f_ary_4**2*df_rho_div_f_ary_4/(f_ary_4[1]-f_ary_4[0]),color='forestgreen',label=r'more f\_bins', lw = 1.5)
plt.plot(f_ary_5,f_ary_5**2*df_rho_div_f_ary_5/(f_ary_5[1]-f_ary_5[0]),color='cornflowerblue',label=r'fewer points', lw = 1.5)
plt.plot(f_ary_6,f_ary_6**2*df_rho_div_f_ary_6/(f_ary_6[1]-f_ary_6[0]),color='salmon',label=r'larger f\_trunc', lw = 1.5)
plt.plot(f_ary_7,f_ary_7**2*df_rho_div_f_ary_7/(f_ary_7[1]-f_ary_7[0]),color='orchid',label=r'lower nside', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
# Fermi-LAT PSF at 2 GeV
# Calculate the appropriate Gaussian approximation to the PSF for 2 GeV
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.2354)
f_ary_8 = pc_inst.f_ary
df_rho_div_f_ary_8 = pc_inst.df_rho_div_f_ary
# Define parameters that specify the Fermi-LAT PSF at 2 GeV
fcore = 0.748988248179
score = 0.428653790656
gcore = 7.82363229341
stail = 0.715962650769
gtail = 3.61883748683
spe = 0.00456544262478
# Define the full PSF in terms of two King functions
def king_fn(x, sigma, gamma):
return 1./(2.*np.pi*sigma**2.)*(1.-1./gamma)*(1.+(x**2./(2.*gamma*sigma**2.)))**(-gamma)
def Fermi_PSF(r):
return fcore*king_fn(r/spe,score,gcore) + (1-fcore)*king_fn(r/spe,stail,gtail)
# Modify the relevant parameters in pc_inst and then make or load the PSF
pc_inst = pc.PSFCorrection(delay_compute=True)
pc_inst.psf_r_func = lambda r: Fermi_PSF(r)
pc_inst.sample_psf_max = 10.*spe*(score+stail)/2.
pc_inst.psf_samples = 10000
pc_inst.psf_tag = 'Fermi_PSF_2GeV'
pc_inst.make_or_load_psf_corr()
# Extract f_ary and df_rho_div_f_ary as usual
f_ary_9 = pc_inst.f_ary
df_rho_div_f_ary_9 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_8,f_ary_8**2*df_rho_div_f_ary_8/(f_ary_8[1]-f_ary_8[0]),color='maroon',label='Gauss PSF', lw = 1.5)
plt.plot(f_ary_8,f_ary_9**2*df_rho_div_f_ary_9/(f_ary_9[1]-f_ary_9[0]),color='forestgreen',label='Fermi PSF', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper right', fancybox=True)
# Fermi-LAT PSF at 20 GeV
# Calculate the appropriate Gaussian approximation to the PSF for 20 GeV
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.05529)
f_ary_10 = pc_inst.f_ary
df_rho_div_f_ary_10 = pc_inst.df_rho_div_f_ary
# Define parameters that specify the Fermi-LAT PSF at 20 GeV
fcore = 0.834725201378
score = 0.498192326976
gcore = 6.32075520959
stail = 1.06648424558
gtail = 4.49677834267
spe = 0.000943339426754
# Define the full PSF in terms of two King functions
def king_fn(x, sigma, gamma):
return 1./(2.*np.pi*sigma**2.)*(1.-1./gamma)*(1.+(x**2./(2.*gamma*sigma**2.)))**(-gamma)
def Fermi_PSF(r):
return fcore*king_fn(r/spe,score,gcore) + (1-fcore)*king_fn(r/spe,stail,gtail)
# Modify the relevant parameters in pc_inst and then make or load the PSF
pc_inst = pc.PSFCorrection(delay_compute=True)
pc_inst.psf_r_func = lambda r: Fermi_PSF(r)
pc_inst.sample_psf_max = 10.*spe*(score+stail)/2.
pc_inst.psf_samples = 10000
pc_inst.psf_tag = 'Fermi_PSF_20GeV'
pc_inst.make_or_load_psf_corr()
# Extract f_ary and df_rho_div_f_ary as usual
f_ary_11 = pc_inst.f_ary
df_rho_div_f_ary_11 = pc_inst.df_rho_div_f_ary
plt.plot(f_ary_10,f_ary_10**2*df_rho_div_f_ary_10/(f_ary_10[1]-f_ary_10[0]),color='maroon',label='Gauss PSF', lw = 1.5)
plt.plot(f_ary_11,f_ary_11**2*df_rho_div_f_ary_11/(f_ary_11[1]-f_ary_11[0]),color='forestgreen',label='Fermi PSF', lw = 1.5)
plt.xlabel('$f$')
plt.ylabel('$f \\times \\rho(f)$')
plt.legend(loc='upper left', fancybox=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Example 2
Step3: Example 3
Step4: Example 4
|
5,401
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.enable_eager_execution()
print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
print(tf.encode_base64("hello world"))
# Operator overloading is also supported
print(tf.square(2) + tf.square(3))
x = tf.matmul([[1]], [[2, 3]])
print(x.shape)
print(x.dtype)
import numpy as np
ndarray = np.ones([3, 3])
print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.multiply(ndarray, 42)
print(tensor)
print("And NumPy operations convert Tensors to numpy arrays automatically")
print(np.add(tensor, 1))
print("The .numpy() method explicitly converts a Tensor to a numpy array")
print(tensor.numpy())
x = tf.random_uniform([3, 3])
print("Is there a GPU available: "),
print(tf.test.is_gpu_available())
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
def time_matmul(x):
%timeit tf.matmul(x, x)
# Force execution on CPU
print("On CPU:")
with tf.device("CPU:0"):
x = tf.random_uniform([1000, 1000])
assert x.device.endswith("CPU:0")
time_matmul(x)
# Force execution on GPU #0 if available
if tf.test.is_gpu_available():
with tf.device("GPU:0"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.
x = tf.random_uniform([1000, 1000])
assert x.device.endswith("GPU:0")
time_matmul(x)
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
# Create a CSV file
import tempfile
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write(Line 1
Line 2
Line 3
)
ds_file = tf.data.TextLineDataset(filename)
ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)
ds_file = ds_file.batch(2)
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
print('\nElements in ds_file:')
for x in ds_file:
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Eager execution basics
Step2: Tensors
Step3: Each Tensor has a shape and a datatype
Step4: The most obvious differences between NumPy arrays and TensorFlow Tensors are
Step5: GPU acceleration
Step6: Device Names
Step8: Datasets
Step9: Apply transformations
Step10: Iterate
|
5,402
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import datetime as dt # date tools, used to note current date
# these are new
import os # operating system tools (check files)
import requests, io # internet and input tools
import zipfile as zf # zip file tools
import shutil # file management tools
# get "response" from url
url = 'http://files.grouplens.org/datasets/movielens/ml-latest-small.zip'
r = requests.get(url)
# describe response
print('Response status code:', r.status_code)
print('Response type:', type(r))
print('Response .content:', type(r.content))
print('Response headers:\n', r.headers, sep='')
# convert bytes to zip file
mlz = zf.ZipFile(io.BytesIO(r.content))
print('Type of zipfile object:', type(mlz))
# what's in the zip file?
mlz.namelist()
mlz.open('ml-latest-small/links.csv')
pd.read_csv(mlz.open('ml-latest-small/links.csv'))
# extract and read csv's
movies = pd.read_csv(mlz.open(mlz.namelist()[2]))
ratings = pd.read_csv(mlz.open(mlz.namelist()[3]))
# what do we have?
for df in [movies, ratings]:
print('Type:', type(df))
print('Dimensions:', df.shape, '\n')
print('Variables:', df.columns.tolist(), '\n')
print('First few rows', df.head(3), '\n')
# writing csv (specify different location)
with open('test_01.csv', 'wb') as out_file:
shutil.copyfileobj(mlz.open(mlz.namelist()[2]), out_file)
# experiment via http://stackoverflow.com/a/18043472/804513
with open('test.zip', 'wb') as out_file:
shutil.copyfileobj(io.BytesIO(r.content), out_file)
ratings.head(3)
movies.head(3)
combo = pd.merge(ratings, movies, # left and right df's
how='left', # add to left
on='movieId' # link with this variable/column
)
print('Dimensions of ratings:', ratings.shape)
print('Dimensions of movies:', movies.shape)
print('Dimensions of new df:', combo.shape)
combo.head(20)
combo_1 = ratings.merge(movies, how='left', on='movieId')
combo_1.head()
combo_2 = ratings.merge(movies, how='inner', on='movieId')
combo_2.shape
combo_3 = movies.merge(ratings, how='right', on='movieId')
combo_3.shape
# save as csv file for future use
combo.to_csv('mlcombined.csv')
count_2 = movies['movieId'].isin(ratings['movieId'])
count_2.sum()
print('Current directory:\n', os.getcwd(), sep='')
print('List of files:', os.listdir(), sep='\n')
combo['rating'].mean()
fig, ax = plt.subplots()
bins = [bin/100 for bin in list(range(25, 575, 50))]
print(bins)
combo['rating'].plot(kind='hist', ax=ax, bins=bins, color='blue', alpha=0.5)
ax.set_xlim(0,5.5)
ax.set_ylabel('Number')
ax.set_xlabel('Rating')
plt.show()
from plotly.offline import iplot # plotting functions
import plotly.graph_objs as go # ditto
import plotly
plotly.offline.init_notebook_mode(connected=True)
trace = go.Histogram(
x=combo['rating'],
histnorm='count',
name='control',
autobinx=False,
xbins=dict(
start=.5,
end=5.0,
size=0.5
),
marker=dict(
color='Blue',
),
opacity=0.75
)
layout = go.Layout(
title='Distribution of ratings',
xaxis=dict(
title='Rating value'
),
yaxis=dict(
title='Count'
),
bargap=0.01,
bargroupgap=0.1
)
iplot(go.Figure(data=[trace], layout=layout))
combo[combo['movieId']==31]['rating'].mean()
ave_mov = combo['rating'].groupby(combo['movieId']).mean()
ave_mov = ave_mov.reset_index()
ave_mov = ave_mov.rename(columns={"rating": "average rating"})
combo2 = combo.merge(ave_mov, how='left', on='movieId')
combo2.shape
combo2.head(3)
combo['ave'] = combo['rating'].groupby(combo['movieId']).transform('mean')
combo.head()
combo2[combo['movieId']==1129]
combo['count'] = combo['rating'].groupby(combo['movieId']).transform('count')
combo.head()
url1 = 'http://esa.un.org/unpd/wpp/DVD/Files/'
url2 = '1_Indicators%20(Standard)/EXCEL_FILES/1_Population/'
url3 = 'WPP2015_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.XLS'
url = url1 + url2 + url3
cols = [2, 5] + list(range(6,28))
est = pd.read_excel(url, sheetname=0, skiprows=16, parse_cols=cols, na_values=['…'])
prj = pd.read_excel(url, sheetname=1, skiprows=16, parse_cols=cols, na_values=['…'])
print('Dimensions and dtypes of estimates: ', est.shape, '\n', est.dtypes.head(), sep='')
print('\nDimensions and dtypes of projections: ', prj.shape, '\n', prj.dtypes.head(), sep='')
est.to_csv('un_pop_est.csv')
prj.to_csv('un_pop_proj.csv')
list(est)[15:]
list(prj)[15:]
est.head()
prj.head()
def cleanpop(df, countries, years):
take df as input and select countries and years
# rename first two columns
names = list(df)
df = df.rename(columns={names[0]: 'Country', names[1]: 'Year'})
# select countries and years
newdf = df[df['Country'].isin(countries) & df['Year'].isin(years)]
return newdf
countries = ['Japan']
past = [1950, 2000]
future = [2050, 2100]
e = cleanpop(est, countries, past)
p = cleanpop(prj, countries, future)
# make copie for later use
ealt = e.copy()
palt = p.copy()
# fix top-coding in estimates
e['80-84'] = e['80-84'].fillna(0.0) + e['80+'].fillna(0.0)
e = e.drop(['80+'], axis=1)
# check dimensions again
print('Dimensions of cleaned estimates: ', e.shape)
print('Dimensions of cleaned projections: ', p.shape)
# check to see if we have the same variables
list(e) == list(p)
ealt.head()
e.head()
pop = pd.concat([e, p], axis=0).fillna(0.0)
pop
popalt = pd.concat([ealt, palt], axis=0)
popalt
pop = pop.drop('Country', axis=1)
popi = pop.set_index('Year')
popi
popi.columns.name = 'Age'
popt = popi.T
popt.head()
ax = popt.plot(kind='bar', color='blue', alpha=0.5,
subplots=True,
sharey=True,
figsize=(8,12))
popi
popi.stack().unstack(level='Year')
list(range(1950, 2016, 5))
countries = ['United States of America', 'Japan']
past = list(range(1950, 2016, 5))
future = list(range(2015, 2101, 5))
e_US_J = cleanpop(est, countries, past)
p_US_J = cleanpop(prj, countries, future)
# fix top-coding in estimates
e_US_J['80-84'] = e_US_J['80-84'].fillna(0.0) + e_US_J['80+'].fillna(0.0)
e_US_J = e_US_J.drop(['80+'], axis=1)
e_US_J.head()
p_US_J[p_US_J['Country']=='United States of America'].head()
pop_US_J = pd.concat([e_US_J, p_US_J], axis=0)#.fillna(0.0)
pop_US_J.shape
pop_US_J
pop_i = pop_US_J.set_index(['Country', 'Year'])
pop_i.index
pop_i.columns.name = 'Age'
pop_st = pop_i.stack()
pop_st.head()
fig, ax = plt.subplots(2, 1, figsize=(12, 8))
pop_st.reorder_levels([2, 0, 1])['5-9']['United States of America'].plot(ax=ax[0], kind='line')
pop_st.reorder_levels([2, 0, 1])['5-9']['Japan'].plot(ax=ax[1], kind='line')
plt.show()
pop_st.head()
pop_st.loc[('Japan', 2100, slice(None))].plot(kind='bar')
plt.show()
pop_st.ix[('Japan', slice(None), '0-4')]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id=movielens></a>
Step2: Exercise. Something to do together. suppose we wanted to save the files on our computer. How would we do it? Would we prefer individual csv's or a single zip?
Step3: <a id=merge-movies></a>
Step4: Merging
Step5: Exercise. Some of these we know how to do, the others we don't. For the ones we know, what is the answer? For the others, what (in loose terms) do we need to be able to do to come up with an answer?
Step6: <a id=population></a>
Step7: Comment. Note that they have different numbers of columns. Let's see where that comes from.
Step9: Clean data
Step10: Merge estimates and projections
Step11: Exercise. What happens if we try to merge the original dataframes, including the one with the extra 80+ column? Run the code below and comment on what you get.
Step12: Shape data
Step13: Exercise. Use set_index, stack, and unstack to shape the dataframe popi into popt.
|
5,403
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
c0=sns.color_palette()[0]
c1=sns.color_palette()[1]
c2=sns.color_palette()[2]
from matplotlib.colors import ListedColormap
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
def points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=True, colorscale=cmap_light, cdiscrete=cmap_bold, alpha=0.1, psize=10, zfunc=False, predicted=False):
h = .02
X=np.concatenate((Xtr, Xte))
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
#plt.figure(figsize=(10,6))
if zfunc:
p0 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
p1 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z=zfunc(p0, p1)
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
ZZ = Z.reshape(xx.shape)
if mesh:
plt.pcolormesh(xx, yy, ZZ, cmap=cmap_light, alpha=alpha, axes=ax)
if predicted:
showtr = clf.predict(Xtr)
showte = clf.predict(Xte)
else:
showtr = ytr
showte = yte
ax.scatter(Xtr[:, 0], Xtr[:, 1], c=showtr-1, cmap=cmap_bold, s=psize, alpha=alpha,edgecolor="k")
# and testing points
ax.scatter(Xte[:, 0], Xte[:, 1], c=showte-1, cmap=cmap_bold, alpha=alpha, marker="s", s=psize+10)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
return ax,xx,yy
def points_plot_prob(ax, Xtr, Xte, ytr, yte, clf, colorscale=cmap_light, cdiscrete=cmap_bold, ccolor=cm, psize=10, alpha=0.1):
ax,xx,yy = points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=False, colorscale=colorscale, cdiscrete=cdiscrete, psize=psize, alpha=alpha, predicted=True)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=ccolor, alpha=.2, axes=ax)
cs2 = plt.contour(xx, yy, Z, cmap=ccolor, alpha=.6, axes=ax)
plt.clabel(cs2, fmt = '%2.1f', colors = 'k', fontsize=14, axes=ax)
return ax
dflog=pd.read_csv("data/01_heights_weights_genders.csv")
dflog.head()
#your turn
sns.regplot(x='Weight', y='Height', data=dflog[dflog['Gender']=='Male'], color='blue')
sns.regplot(x='Weight', y='Height', data=dflog[dflog['Gender']=='Female'], color='green')
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score
def cv_score(clf, x, y, score_func=accuracy_score):
result = 0
nfold = 5
for train, test in KFold(y.size, nfold): # split data into train/test groups, 5 times
clf.fit(x[train], y[train]) # fit
result += score_func(clf.predict(x[test]), y[test]) # evaluate score function on held-out data
return result / nfold # average
from sklearn.cross_validation import train_test_split
Xlr, Xtestlr, ylr, ytestlr = train_test_split(dflog[['Height','Weight']].values,
(dflog.Gender=="Male").values,random_state=5)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(Xlr,ylr)
print(accuracy_score(clf.predict(Xtestlr),ytestlr))
clf = LogisticRegression()
score = cv_score(clf, Xlr, ylr)
print(score)
#the grid of parameters to search over
Cs = [0.001, 0.1, 1, 10, 100]
#your turn
from sklearn.model_selection import cross_val_score
for C in Cs:
clf = LogisticRegression(C=C)
print(C)
print(sum(cross_val_score(clf, Xlr, ylr, cv=5, scoring='accuracy'))/5)
# So the best one is 0.001, but the differences are hardly there.
#your turn
clf = LogisticRegression(C=0.001)
clf.fit(Xlr,ylr)
print(accuracy_score(clf.predict(Xtestlr),ytestlr))
# The differences are mariginal in this case, I presume that the default is default for a reason.
# Namely that it performs usually well. With some other cases/algorithms the parameter may have a bigger influence.
# To get the best performing model, tuning hyperparamters is necessary.
#your turn
from sklearn.model_selection import GridSearchCV
dict_Cs = {'C': [0.001, 0.1, 1, 10, 100]}
clf = GridSearchCV(LogisticRegression(), dict_Cs, 'accuracy', cv=5)
clf.fit(Xlr,ylr)
print(clf.best_params_)
print(clf.best_score_)
print(accuracy_score(clf.predict(Xtestlr),ytestlr))
# It gives the same result, when the same number of folds is used.
def cv_optimize(clf, parameters, Xtrain, ytrain, n_folds=5):
gs = GridSearchCV(clf, param_grid=parameters, cv=n_folds)
gs.fit(Xtrain, ytrain)
print("BEST PARAMS", gs.best_params_)
best = gs.best_estimator_
return best
from sklearn.cross_validation import train_test_split
def do_classify(clf, parameters, indf, featurenames, targetname, target1val, standardize=False, train_size=0.8):
subdf=indf[featurenames]
if standardize:
subdfstd=(subdf - subdf.mean())/subdf.std()
else:
subdfstd=subdf
X=subdfstd.values
y=(indf[targetname].values==target1val)*1
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size)
clf = cv_optimize(clf, parameters, Xtrain, ytrain)
clf=clf.fit(Xtrain, ytrain)
training_accuracy = clf.score(Xtrain, ytrain)
test_accuracy = clf.score(Xtest, ytest)
print("Accuracy on training data: %0.2f" % (training_accuracy))
print("Accuracy on test data: %0.2f" % (test_accuracy))
return clf, Xtrain, ytrain, Xtest, ytest
h = lambda z: 1./(1+np.exp(-z))
zs=np.arange(-5,5,0.1)
plt.plot(zs, h(zs), alpha=0.5);
dflog.head()
clf_l, Xtrain_l, ytrain_l, Xtest_l, ytest_l = do_classify(LogisticRegression(),
{"C": [0.01, 0.1, 1, 10, 100]},
dflog, ['Weight', 'Height'], 'Gender','Male')
plt.figure()
ax=plt.gca()
points_plot(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, alpha=0.2);
clf_l.predict_proba(Xtest_l)
plt.figure()
ax=plt.gca()
points_plot_prob(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, psize=20, alpha=0.1);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using sklearn
Step2: Remember that the form of data we will use always is
Step3: In the Linear Regression Mini Project, the last (extra credit) exercise was to write a K-Fold cross-validation. Feel free to use that code below, or just use the cv_score function we've provided.
Step4: First, we try a basic Logistic Regression
Step5: While this looks like a pretty great model, we would like to ensure two things
Step6: Your turn
Step7: Things to think about
Step8: Use scikit-learn's GridSearchCV tool
Step9: Recap of the math behind Logistic Regression (optional, feel free to skip)
Step10: We then use this best classifier to fit the entire training set. This is done inside the do_classify function which takes a dataframe indf as input. It takes the columns in the list featurenames as the features used to train the classifier. The column targetname sets the target. The classification is done by setting those samples for which targetname has value target1val to the value 1, and all others to 0. We split the dataframe into 80% training and 20% testing by default, standardizing the dataset if desired. (Standardizing a data set involves scaling the data so that it has 0 mean and is described in units of its standard deviation. We then train the model on the training set using cross-validation. Having obtained the best classifier using cv_optimize, we retrain on the entire training set and calculate the training and testing accuracy, which we print. We return the split data and the trained classifier.
Step11: Logistic Regression
Step12: So we then come up with our rule by identifying
Step13: In the figure here showing the results of the logistic regression, we plot the actual labels of both the training(circles) and test(squares) samples. The 0's (females) are plotted in red, the 1's (males) in blue. We also show the classification boundary, a line (to the resolution of a grid square). Every sample on the red background side of the line will be classified female, and every sample on the blue side, male. Notice that most of the samples are classified well, but there are misclassified people on both sides, as evidenced by leakage of dots or squares of one color ontothe side of the other color. Both test and traing accuracy are about 92%.
Step14: Discriminative classifier
|
5,404
|
<ASSISTANT_TASK:>
Python Code:
with open('/resources/data/Example2.txt','w') as writefile:
writefile.write("This is line A")
with open('/resources/data/Example2.txt','r') as testwritefile:
print(testwritefile.read())
with open('/resources/data/Example2.txt','w') as writefile:
writefile.write("This is line A\n")
writefile.write("This is line B\n")
with open('/resources/data/Example2.txt','r') as testwritefile:
print(testwritefile.read())
with open('/resources/data/Example2.txt','a') as testwritefile:
testwritefile.write("This is line C\n")
with open('/resources/data/Example2.txt','r') as testwritefile:
print(testwritefile.read())
Lines=["This is line A\n","This is line B\n","This is line C\n"]
Lines
with open('Example2.txt','w') as writefile:
for line in Lines:
print(line)
writefile.write(line)
with open('Example2.txt','r') as testwritefile:
print(testwritefile.read())
with open('Example2.txt','a') as testwritefile:
testwritefile.write("This is line D\n")
with open('Example2.txt','r') as testwritefile:
print(testwritefile.read())
with open('Example2.txt','r') as readfile:
with open('Example3.txt','w') as writefile:
for line in readfile:
writefile.write(line)
with open('Example3.txt','r') as testwritefile:
print(testwritefile.read())
# Write CSV file example
student_list = [{"Student ID": 1, "Gender": "F", "Name": "Emma"},
{"Student ID": 2, "Gender": "M", "Name": "John"},
{"Student ID": 3, "Gender": "F", "Name": "Linda"}]
# Write csv file
with open('Example_csv.csv','w') as writefile:
# Set header for each column
for col_header in list(student_list[0].keys()):
writefile.write(str(col_header) + ", ")
writefile.write("\n")
# Set value for each column
for student in student_list:
for col_ele in list(student.values()):
writefile.write(str(col_ele) + ", ")
writefile.write("\n")
# Print out the result csv
with open('Example_csv.csv','r') as testwritefile:
print(testwritefile.read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can read the file to see if it worked
Step2: We can write multiple lines
Step3: The method .write() works similar to the method .readline(), except instead of reading a new line it writes a new line. The process is illustrated in the figure , the different colour coding of the grid represents a new line added to the file after each method call.
Step4: By setting the mode argument to append a you can append a new line as follows
Step5: You can verify the file has changed by running the following cell
Step6: We write a list to a .txt file as follows
Step7: We can verify the file is written by reading it and printing out the values
Step8: We can again append to the file by changing the second parameter to a. This adds the code
Step9: We can see the results of appending the file
Step10: Copy a file
Step11: We can read the file to see if everything works
Step12: After reading files, we can also write data into files and save them in different file formats like .txt, .csv, .xls (for excel files) etc. Let's take a look at an example.
|
5,405
|
<ASSISTANT_TASK:>
Python Code:
import dendropy
import pandas as pd
data = pd.read_csv('../Data/PyronParityData.csv', index_col=0, header=False)
taxa = dendropy.TaxonSet()
mle = dendropy.Tree.get_from_path('../TotalOpt/annotatedTO_0param_2598364.dated', 'newick', taxon_set=taxa, preserve_underscores=True)
for idx, nd in enumerate(mle.postorder_node_iter()):
if nd.label is None:
lookup = '{}'.format(nd.taxon)
nd.label = int(data.ix[lookup])
else:
pass
putative_c = []
putative_co = []
total = []
childs = []
for index, node in enumerate(mle.postorder_node_iter()):
total.append(index)
if node.parent_node is None:
pass
elif .5 < float(node.label) < 1 or float(node.label) == 0: #Is likely oviparous
if float(node.parent_node.label) < .05 : #List of nodes that demonstrate change away from oviparity.
if node.taxon is not None :
putative_co.append([node.parent_node.label, node.taxon])
else:
putative_co.append(node.parent_node.label)
for nd in node.child_nodes():
# print nd.taxon
pass
elif 0 < float(node.label) < .95 or float(node.label) == 1:
if float(node.parent_node.label) > .05:
putative_c.append([node.parent_node.label,node.taxon])
print len(putative_c), 'changes to viviparity'
print len(putative_co), 'reversions to oviparity'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read data and tree.
Step2: Iterate over the tips of the trees and annotate with data (in this case, whether the tip is viviparous or oviparous). T
Step3: The counting loop. If we have a tip that has data, append to either the oviparous or viviparous list, as apporpriate. If the node label is annotated as having over a 50% probability of being oviparous, add to the oviparous list. If the node is likely to be viviparous, add to the viviparous list.
|
5,406
|
<ASSISTANT_TASK:>
Python Code:
# !pip install phiflow
from phi.flow import *
x = math.stack({'Sun': (0, 0), 'Earth': (10, 0), 'Mars': (0, 12)}, instance('planets'))
x
vis.plot(PointCloud(x, bounds=Box(x=(-2, 12), y=(-1, 13))))
v = math.rotate_vector(x, PI/2)
v = math.divide_no_nan(v, math.vec_length(v))
vis.plot(PointCloud(x, values=v, bounds=Box(x=(-2, 12), y=(-1, 13))))
masses = math.stack({'Sun': 1000, 'Earth': 10, 'Mars': 10}, instance('planets'))
vis.plot(PointCloud(Sphere(x, radius=masses**0.333 * .2), bounds=Box(x=(-2, 12), y=(-1, 13))))
def simulate(x, v, dt=.5):
dx = x - math.rename_dims(x, 'planets', 'others')
a = - .01 * math.sum(math.divide_no_nan(math.rename_dims(masses, 'planets', 'others') * dx, math.vec_squared(dx) ** 1.5), 'others')
return x + v * dt, v + a * dt
xs, vs = [x], [v]
for i in range(100):
x, v = simulate(x, v)
xs.append(x)
vs.append(v)
xs = math.stack(xs, batch('time'))
vs = math.stack(vs, batch('time'))
xs
vis.plot(vis.overlay(
PointCloud(Sphere(xs, radius=masses ** 0.333 / 5), bounds=Box(x=(-20, 20), y=(-20, 20))),
PointCloud(xs, vs * 2, bounds=Box(x=(-20, 20), y=(-20, 20)))), animate='time', frame_time=100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We use the convenience import of Φ<sub>Flow</sub> which imports the core submodules, such as math and vis.
Step2: Let's define the initial positions for our planets.
Step3: We can plot these positions by constructing a PointCloud, using the bounds parameter to specify the axis range. Since the universe we are simulating is infinite, the bounds have no physical meaning in this case.
Step4: Next, let's give the planets initial velocities so that they circle the sun.
Step5: Next, we define the masses of our planets which will determine the gravitational pull.
Step6: To Simulate our system, we define a simple update step and run it repeatedly, collecting all intermediate states in the lists xs and vs.
Step7: Finally, let's plot the system trajectory as an animation!
Step8: Now we can specify this dimension via the animate argument.
|
5,407
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# Install necessary dependencies
! pip3 install {USER_FLAG} --upgrade google-cloud-aiplatform
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = "qwiklabs-gcp-00-b9e7121a76ba" # Replace your Project ID here
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-00-b9e7121a76ba" # Replace your Project ID here
! gcloud config set project $PROJECT_ID
# Import necessary librarary
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
BUCKET_URI = "gs://qwiklabs-gcp-00-b9e7121a76ba" # Replace your Bucket name here
REGION = "us-central1" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://qwiklabs-gcp-00-b9e7121a76ba": # Replace your Bucket name here
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
print(BUCKET_URI)
# Create your bucket
! gsutil mb -l $REGION $BUCKET_URI
# Give access to your Cloud Storage bucket
! gsutil ls -al $BUCKET_URI
# Import necessary libraries
import os
import sys
from google.cloud import aiplatform
from google.cloud.aiplatform import hyperparameter_tuning as hpt
%%writefile Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-gpu.2-5
WORKDIR /
# Installs hypertune library
RUN pip install cloudml-hypertune
# Copies the trainer code to the docker image.
COPY trainer /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
# Create trainer directory
! mkdir trainer
%%writefile trainer/task.py
import argparse
import hypertune
import tensorflow as tf
import tensorflow_datasets as tfds
def get_args():
Parses args. Must include all hyperparameters you want to tune.
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate', required=True, type=float, help='learning rate')
parser.add_argument(
'--momentum', required=True, type=float, help='SGD momentum value')
parser.add_argument(
'--units',
required=True,
type=int,
help='number of units in last hidden layer')
parser.add_argument(
'--epochs',
required=False,
type=int,
default=10,
help='number of training epochs')
args = parser.parse_args()
return args
def preprocess_data(image, label):
Resizes and scales images.
image = tf.image.resize(image, (150, 150))
return tf.cast(image, tf.float32) / 255., label
def create_dataset(batch_size):
Loads Horses Or Humans dataset and preprocesses data.
data, info = tfds.load(
name='horses_or_humans', as_supervised=True, with_info=True)
# Create train dataset
train_data = data['train'].map(preprocess_data)
train_data = train_data.shuffle(1000)
train_data = train_data.batch(batch_size)
# Create validation dataset
validation_data = data['test'].map(preprocess_data)
validation_data = validation_data.batch(64)
return train_data, validation_data
def create_model(units, learning_rate, momentum):
Defines and compiles model.
inputs = tf.keras.Input(shape=(150, 150, 3))
x = tf.keras.layers.Conv2D(16, (3, 3), activation='relu')(inputs)
x = tf.keras.layers.MaxPooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPooling2D((2, 2))(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units, activation='relu')(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=momentum),
metrics=['accuracy'])
return model
def main():
args = get_args()
# Create Strategy
strategy = tf.distribute.MirroredStrategy()
# Scale batch size
GLOBAL_BATCH_SIZE = 64 * strategy.num_replicas_in_sync
train_data, validation_data = create_dataset(GLOBAL_BATCH_SIZE)
# Wrap model variables within scope
with strategy.scope():
model = create_model(args.units, args.learning_rate, args.momentum)
# Train model
history = model.fit(
train_data, epochs=args.epochs, validation_data=validation_data)
# Define Metric
hp_metric = history.history['val_accuracy'][-1]
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=hp_metric,
global_step=args.epochs)
if __name__ == '__main__':
main()
# Set the IMAGE_URI
IMAGE_URI = f"gcr.io/{PROJECT_ID}/horse-human:hypertune"
# Build the docker image
! docker build -f Dockerfile -t $IMAGE_URI ./
# Push it to Google Container Registry:
! docker push $IMAGE_URI
# Define required specifications
worker_pool_specs = [
{
"machine_spec": {
"machine_type": "n1-standard-4",
"accelerator_type": "NVIDIA_TESLA_T4",
"accelerator_count": 2,
},
"replica_count": 1,
"container_spec": {"image_uri": IMAGE_URI},
}
]
metric_spec = {"accuracy": "maximize"}
parameter_spec = {
"learning_rate": hpt.DoubleParameterSpec(min=0.001, max=1, scale="log"),
"momentum": hpt.DoubleParameterSpec(min=0, max=1, scale="linear"),
"units": hpt.DiscreteParameterSpec(values=[64, 128, 512], scale=None),
}
print(BUCKET_URI)
# Create a CustomJob
JOB_NAME = "horses-humans-hyperparam-job" + TIMESTAMP
# TODO 1
my_custom_job = aiplatform.CustomJob(
display_name=JOB_NAME,
project=PROJECT_ID,
worker_pool_specs=worker_pool_specs,
staging_bucket=BUCKET_URI,
)
# Create and run HyperparameterTuningJob
# TODO 2
hp_job = aiplatform.HyperparameterTuningJob(
display_name=JOB_NAME,
custom_job=my_custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=15,
parallel_trial_count=3,
project=PROJECT_ID,
search_algorithm=None,
)
hp_job.run()
# Set this to true only if you'd like to delete your bucket
# TODO 3
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Set up your Google Cloud project
Step3: Otherwise, set your project ID here.
Step4: Set project ID
Step5: Timestamp
Step6: Create a Cloud Storage bucket
Step7: Only if your bucket doesn't already exist
Step8: Finally, validate access to your Cloud Storage bucket by examining its contents
Step9: Import libraries and define constants
Step10: Write Dockerfile
Step11: Create training application code
Step16: In the next cell, you write the contents of the training script, task.py. This file downloads the horses or humans dataset from TensorFlow datasets and trains a tf.keras functional model using MirroredStrategy from the tf.distribute module.
Step17: Build the Container
Step18: Create and run hyperparameter tuning job on Vertex AI
Step19: Create a CustomJob.
Step20: Then, create and run a HyperparameterTuningJob.
Step21: It will nearly take 50 mintues to complete the job successfully.
|
5,408
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
a = np.array([-2, 3, 4, -5, 5])
print(a)
a[[1, 3]]
a[a > 0]
print(a)
print(a > 0)
a[(a > 0) & (a < 5)]
pop_dict = {'Germany': 81.3,
'Belgium': 11.3,
'France': 64.3,
'United Kingdom': 64.9,
'Netherlands': 16.9}
population = pd.Series(pop_dict)
print(population)
population[['Netherlands', 'Germany']]
population[population > 20]
population[:2]
data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']}
countries = pd.DataFrame(data)
countries
countries = countries.set_index('country')
countries
countries['area']
countries[['area', 'population']]
countries['France':'Netherlands']
countries[countries['area'] > 100000]
countries.loc['Germany', 'area']
countries.loc['France':'Germany', ['area', 'population']]
countries.iloc[:2,1:3]
countries2 = countries.copy()
countries2.loc['Belgium':'Germany', 'population'] = 10
countries2
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: More on NumPy indexing
Step2: Fancy indexing
Step3: Boolean indexing
Step4: Note that the index array has the same size as and type of boolean
Step5: Multiple criteria can be also combine in one query
Step6: <div class="alert alert-success">
Step7: We can use fancy indexing with the rich index
Step8: Similarly, boolean indexing can be used to filter the Series. Lets select countries with population of more than 20 millions
Step9: You can also do position-based indexing by using integers instead of labels
Step10: Indexing DataFrame
Step11: Some notes on selecting data
Step12: or multiple columns using fancy indexing
Step13: But, slicing accesses the rows
Step14: We can also select rows similarly to the boolean indexing in numpy. The boolean mask should be 1-dimensional and the same length as the thing being indexed. Boolean indexing of DataFrame can be used like the WHERE clause of SQL to select rows matching some criteria
Step15: So as a summary, [] provides the following convenience shortcuts
Step16: But the row or column indexer can also be a list, slice, boolean array, ..
Step17: Selecting by position with iloc works similar as indexing numpy arrays
Step18: The different indexing methods can also be used to assign data
Step19: <div class="alert alert-success">
|
5,409
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
objective = np.poly1d([1.3, 4.0, 0.6])
print objective
import scipy.optimize as opt
x_ = opt.fmin(objective, [3])
print "solved: x={}".format(x_)
%matplotlib inline
x = np.linspace(-4,1,101.)
import matplotlib.pylab as mpl
mpl.plot(x, objective(x))
mpl.plot(x_, objective(x_), 'ro')
import scipy.special as ss
import scipy.optimize as opt
import numpy as np
import matplotlib.pylab as mpl
x = np.linspace(2, 7, 200)
# 1st order Bessel
j1x = ss.j1(x)
mpl.plot(x, j1x)
# use scipy.optimize's more modern "results object" interface
result = opt.minimize_scalar(ss.j1, method="bounded", bounds=[2, 4])
j1_min = ss.j1(result.x)
mpl.plot(result.x, j1_min,'ro')
import mystic.models as models
print(models.rosen.__doc__)
!mystic_model_plotter.py mystic.models.rosen -f -d -x 1 -b "-3:3:.1, -1:5:.1, 1"
import mystic
mystic.model_plotter(mystic.models.rosen, fill=True, depth=True, scale=1, bounds="-3:3:.1, -1:5:.1, 1")
import scipy.optimize as opt
import numpy as np
# initial guess
x0 = [1.3, 1.6, -0.5, -1.8, 0.8]
result = opt.minimize(opt.rosen, x0)
print result.x
# number of function evaluations
print result.nfev
# again, but this time provide the derivative
result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der)
print result.x
# number of function evaluations and derivative evaluations
print result.nfev, result.njev
print ''
# however, note for a different x0...
for i in range(5):
x0 = np.random.randint(-20,20,5)
result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der)
print "{} @ {} evals".format(result.x, result.nfev)
# http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#tutorial-sqlsp
'''
Maximize: f(x) = 2*x0*x1 + 2*x0 - x0**2 - 2*x1**2
Subject to: x0**3 - x1 == 0
x1 >= 1
'''
import numpy as np
def objective(x, sign=1.0):
return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
def derivative(x, sign=1.0):
dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
dfdx1 = sign*(2*x[0] - 4*x[1])
return np.array([ dfdx0, dfdx1 ])
# unconstrained
result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,),
jac=derivative, method='SLSQP', options={'disp': True})
print("unconstrained: {}".format(result.x))
cons = ({'type': 'eq',
'fun' : lambda x: np.array([x[0]**3 - x[1]]),
'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
{'type': 'ineq',
'fun' : lambda x: np.array([x[1] - 1]),
'jac' : lambda x: np.array([0.0, 1.0])})
# constrained
result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,), jac=derivative,
constraints=cons, method='SLSQP', options={'disp': True})
print("constrained: {}".format(result.x))
# from scipy.optimize.minimize documentation
'''
**Unconstrained minimization**
Method *Nelder-Mead* uses the Simplex algorithm [1]_, [2]_. This
algorithm has been successful in many applications but other algorithms
using the first and/or second derivatives information might be preferred
for their better performances and robustness in general.
Method *Powell* is a modification of Powell's method [3]_, [4]_ which
is a conjugate direction method. It performs sequential one-dimensional
minimizations along each vector of the directions set (`direc` field in
`options` and `info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method *CG* uses a nonlinear conjugate gradient algorithm by Polak and
Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp.
120-122. Only the first derivatives are used.
Method *BFGS* uses the quasi-Newton method of Broyden, Fletcher,
Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives
only. BFGS has proven good performance even for non-smooth
optimizations. This method also returns an approximation of the Hessian
inverse, stored as `hess_inv` in the OptimizeResult object.
Method *Newton-CG* uses a Newton-CG algorithm [5]_ pp. 168 (also known
as the truncated Newton method). It uses a CG method to the compute the
search direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm.
Method *Anneal* uses simulated annealing, which is a probabilistic
metaheuristic algorithm for global optimization. It uses no derivative
information from the function being optimized.
Method *dogleg* uses the dog-leg trust-region algorithm [5]_
for unconstrained minimization. This algorithm requires the gradient
and Hessian; furthermore the Hessian is required to be positive definite.
Method *trust-ncg* uses the Newton conjugate gradient trust-region
algorithm [5]_ for unconstrained minimization. This algorithm requires
the gradient and either the Hessian or a function that computes the
product of the Hessian with a given vector.
**Constrained minimization**
Method *L-BFGS-B* uses the L-BFGS-B algorithm [6]_, [7]_ for bound
constrained minimization.
Method *TNC* uses a truncated Newton algorithm [5]_, [8]_ to minimize a
function with variables subject to bounds. This algorithm uses
gradient information; it is also called Newton Conjugate-Gradient. It
differs from the *Newton-CG* method described above as it wraps a C
implementation and allows each variable to be given upper and lower
bounds.
Method *COBYLA* uses the Constrained Optimization BY Linear
Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is
based on linear approximations to the objective function and each
constraint. The method wraps a FORTRAN implementation of the algorithm.
Method *SLSQP* uses Sequential Least SQuares Programming to minimize a
function of several variables with any combination of bounds, equality
and inequality constraints. The method wraps the SLSQP Optimization
subroutine originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into large
floating values.
'''
import scipy.optimize as opt
# constrained: linear (i.e. A*x + b)
print opt.cobyla.fmin_cobyla
print opt.linprog
# constrained: quadratic programming (i.e. up to x**2)
print opt.fmin_slsqp
# http://cvxopt.org/examples/tutorial/lp.html
'''
minimize: f = 2*x0 + x1
subject to:
-x0 + x1 <= 1
x0 + x1 >= 2
x1 >= 0
x0 - 2*x1 <= 4
'''
import cvxopt as cvx
from cvxopt import solvers as cvx_solvers
A = cvx.matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0] ])
b = cvx.matrix([ 1.0, -2.0, 0.0, 4.0 ])
cost = cvx.matrix([ 2.0, 1.0 ])
sol = cvx_solvers.lp(cost, A, b)
print(sol['x'])
# http://cvxopt.org/examples/tutorial/qp.html
'''
minimize: f = 2*x1**2 + x2**2 + x1*x2 + x1 + x2
subject to:
x1 >= 0
x2 >= 0
x1 + x2 == 1
'''
import cvxopt as cvx
from cvxopt import solvers as cvx_solvers
Q = 2*cvx.matrix([ [2, .5], [.5, 1] ])
p = cvx.matrix([1.0, 1.0])
G = cvx.matrix([[-1.0,0.0],[0.0,-1.0]])
h = cvx.matrix([0.0,0.0])
A = cvx.matrix([1.0, 1.0], (1,2))
b = cvx.matrix(1.0)
sol = cvx_solvers.qp(Q, p, G, h, A, b)
print(sol['x'])
import scipy.optimize as opt
# probabilstic solvers, that use random hopping/mutations
print opt.differential_evolution
print opt.basinhopping
print opt.anneal
import scipy.optimize as opt
# bounds instead of an initial guess
bounds = [(-10., 10)]*5
for i in range(10):
result = opt.differential_evolution(opt.rosen, bounds)
print result.x,
# number of function evaluations
print '@ {} evals'.format(result.nfev)
import scipy.optimize as opt
import scipy.stats as stats
import numpy as np
# Define the function to fit.
def function(x, a, b, f, phi):
result = a * np.exp(-b * np.sin(f * x + phi))
return result
# Create a noisy data set around the actual parameters
true_params = [3, 2, 1, np.pi/4]
print "target parameters: {}".format(true_params)
x = np.linspace(0, 2*np.pi, 25)
exact = function(x, *true_params)
noisy = exact + 0.3*stats.norm.rvs(size=len(x))
# Use curve_fit to estimate the function parameters from the noisy data.
initial_guess = [1,1,1,1]
estimated_params, err_est = opt.curve_fit(function, x, noisy, p0=initial_guess)
print "solved parameters: {}".format(estimated_params)
# err_est is an estimate of the covariance matrix of the estimates
print "covarance: {}".format(err_est.diagonal())
import matplotlib.pylab as mpl
mpl.plot(x, noisy, 'ro')
mpl.plot(x, function(x, *estimated_params))
import numpy as np
import scipy.optimize as opt
def system(x,a,b,c):
x0, x1, x2 = x
eqs= [
3 * x0 - np.cos(x1*x2) + a, # == 0
x0**2 - 81*(x1+0.1)**2 + np.sin(x2) + b, # == 0
np.exp(-x0*x1) + 20*x2 + c # == 0
]
return eqs
# coefficients
a = -0.5
b = 1.06
c = (10 * np.pi - 3.0) / 3
# initial guess
x0 = [0.1, 0.1, -0.1]
# Solve the system of non-linear equations.
result = opt.root(system, x0, args=(a, b, c))
print "root:", result.x
print "solution:", result.fun
import numpy as np
import scipy.stats as stats
# Create clean data.
x = np.linspace(0, 4.0, 100)
y = 1.5 * np.exp(-0.2 * x) + 0.3
# Add a bit of noise.
noise = 0.1 * stats.norm.rvs(size=100)
noisy_y = y + noise
# Fit noisy data with a linear model.
linear_coef = np.polyfit(x, noisy_y, 1)
linear_poly = np.poly1d(linear_coef)
linear_y = linear_poly(x)
# Fit noisy data with a quadratic model.
quad_coef = np.polyfit(x, noisy_y, 2)
quad_poly = np.poly1d(quad_coef)
quad_y = quad_poly(x)
import matplotlib.pylab as mpl
mpl.plot(x, noisy_y, 'ro')
mpl.plot(x, linear_y)
mpl.plot(x, quad_y)
#mpl.plot(x, y)
import mystic.models as models
print models.zimmermann.__doc__
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The "optimizer"
Step2: Additional components
Step3: The gradient and/or hessian
Step4: The penalty functions
Step5: Optimizer classifications
Step6: Notice how much nicer it is to see the optimizer "trajectory". Now, instead of a single number, we have the path the optimizer took. scipy.optimize has a version of this, with options={'retall'
Step7: Gradient descent and steepest descent
Step8: Not Covered
Step9: Parameter estimation
Step10: Standard diagnostic tools
|
5,410
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from math import pi
import matplotlib.pyplot as plot
%matplotlib notebook
x = np.arange(-5, 5.001, 0.0001)
y = (x**4)-(16*(x**2)) + 16
plot.plot(x,y,'c')
plot.grid(True)
print('Para a f(x) = ax^2 + bx+ c, diga os valores de a, b e c:\n')
a = float(input('Valor de a: '))
b = float(input('Valor de b: '))
c = float(input('Valor de c: '))
delta = b**2 - 4*a*c
xmax = (-b)/(2*a)
x = np.arange(xmax-4, xmax+4.001, 0.001)
y = a*(x**2) + b*x + c
plot.plot(x, y, 'c')
plot.grid(True)
t = np.arange(0, 2*pi + 0.001, 0.001)
x = 0 + 2*np.sin(t)
y = 0 + 2*np.cos(t)
plot.plot(x, y, 'c')
plot.axis('equal')
t = np.arange(0, 2*pi+0.001, 0.001)
x = 2+2*np.sin(t)
y = 2+2*np.cos(t)
plot.plot(x, y, 'c')
plot.axis('equal')
plot.grid(True)
t = np.arange(0, 2*pi+0.001, 0.001)
for r in np.arange(1, 13.5, 0.5):
x = r*np.sin(t)
y = r*np.cos(t)
plot.plot(x, y, 'c')
plot.axis('equal')
plot.grid(True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Questão 2
Step2: Questão 3
Step3: Questão 4
|
5,411
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
%matplotlib inline
from datetime import datetime
import numpy as np
import utils.data_cube_utilities.dc_utilities as utils
from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask
from utils.data_cube_utilities.dc_mosaic import create_mosaic#, ls8_unpack_qa
from utils.data_cube_utilities.dc_water_classifier import wofs_classify
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
# Initialize data cube object
import datacube
dc = datacube.Datacube(app='dc-coastal-erosion')
start_time = datetime.now()
print("Start time: " + str(start_time))
# Set query parameters
platform = 'LANDSAT_8'
product = 'ls8_usgs_sr_scene'
collection = 'c1'
level = 'l2'
# Select minimum and maximum longitudes and latitudes and the time range.
# Ghana
# Coastline east of Accra
lon = (0.0520, 0.3458)
lat = (5.6581, 5.8113)
first_year = 2013
first_time_range = (f'{first_year}-01-01', f'{first_year}-12-31')
last_year = 2014
second_time_range = (f'{last_year}-01-01', f'{last_year}-12-31')
from utils.data_cube_utilities.dc_display_map import display_map
display_map(lat, lon)
measurements = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2', 'pixel_qa']
common_load_params = dict(platform=platform, product=product,
lon=lon, lat=lat, measurements=measurements,
dask_chunks={'time':1, 'latitude':1000, 'longitude':1000})
# Retrieve data from Data Cube
first_dataset = dc.load(time=first_time_range, **common_load_params)
# Retrieve data from Data Cube
second_dataset = dc.load(time=second_time_range, **common_load_params)
from utils.data_cube_utilities.dc_utilities import ignore_warnings
plt_col_lvl_params = dict(platform=platform, collection=collection, level=level)
# Only keep pixels that are clear or have water.
first_clean_mask = landsat_qa_clean_mask(first_dataset, **plt_col_lvl_params)
second_clean_mask = landsat_qa_clean_mask(second_dataset, **plt_col_lvl_params)
# Remove noise from images by using appropriate data within the dataset to replace "dirty" data.
first_mosaic = ignore_warnings(create_mosaic, first_dataset, clean_mask=first_clean_mask)
second_mosaic = ignore_warnings(create_mosaic, second_dataset, clean_mask=second_clean_mask)
first_water_class = \
ignore_warnings(wofs_classify, first_mosaic, mosaic=True, no_data=np.nan)
second_water_class = \
ignore_warnings(wofs_classify, second_mosaic, mosaic=True, no_data=np.nan)
first_wofs = first_water_class.wofs.values
second_wofs = second_water_class.wofs.values
coastal_change = second_water_class - first_water_class
import matplotlib.pyplot as plt
def plot_data_array_with_aspect(da):
fig, ax = plt.subplots(figsize=(15,8))
ax.set_aspect('equal')
da.plot()
# -1 -> water to coast
# 0 -> no change
# 1 -> coast to water (Coastal Erosion)
plot_data_array_with_aspect( coastal_change.wofs )
first_coastline = np.zeros(first_wofs.shape)
for i in range(first_wofs.shape[0]):
for j in range(first_wofs.shape[1]):
pixel = first_wofs[i,j]
if pixel == 0 and np.nansum(first_wofs[i-1:i+2, j-1:j+2]) >= 1 and np.nansum(first_wofs[i-1:i+2, j-1:j+2]) <= 5:
first_wofs[i,j] = 1
second_coastline = np.zeros(second_wofs.shape)
for i in range(second_wofs.shape[0]):
for j in range(second_wofs.shape[1]):
pixel = second_wofs[i,j]
if pixel == 0 and np.nansum(second_wofs[i-1:i+2, j-1:j+2]) >= 1 and np.nansum(second_wofs[i-1:i+2, j-1:j+2]) <= 5:
second_wofs[i,j] = 1
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pylab import imshow
fig = plt.figure(figsize = (15,8))
a=fig.add_subplot(1,2,1)
imgplot = plt.imshow(first_wofs, cmap='Blues',
extent=[first_water_class.longitude.values.min(),
first_water_class.longitude.values.max(),
first_water_class.latitude.values.min(),
first_water_class.latitude.values.max()])
a.set_title(f'wofs - {first_year}')
plt.colorbar(ticks=[0,1], orientation ='horizontal')
a=fig.add_subplot(1,2,2)
imgplot = plt.imshow(second_wofs, cmap='Blues',
extent=[second_water_class.longitude.values.min(),
second_water_class.longitude.values.max(),
second_water_class.latitude.values.min(),
second_water_class.latitude.values.max()])
#imgplot.set_clim(0.0,1.0)
a.set_title(f'wofs - {last_year}')
plt.colorbar(ticks=[0,1], orientation='horizontal')
plt.savefig('wofs_compare.png')
fig = plt.figure(figsize = (15,8))
plt.imshow(first_coastline, cmap='Greens')
fig = plt.figure(figsize = (15,8))
plt.imshow(second_coastline, cmap='Purples')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span id="coastal_change_classifier_plat_prod">Choose Platform and Product ▴</span>
Step2: <span id="coastal_change_classifier_define_extents">Define the Extents of the Analysis ▴</span>
Step3: Visualize the selected area
Step4: <span id="coastal_change_classifier_retrieve_data">Load Data from the Data Cube and Create Composites ▴</span>
Step5: <span id="coastal_change_classifier_water_cls_and_coastal_change">Obtain Water Classifications and Coastal Change ▴</span>
Step6: Show the coastal change
Step7: Obtain the coastlines for the first and second time periods
Step8: Show the water classifications for the first and second time periods
Step9: Show the coastline for the first time period
Step10: Show the coastline for the second time period
|
5,412
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from sklearn import metrics, preprocessing, linear_model
dirRawData = '/home/john/Projects/RepoNumerAI/data/raw/numerai_datasets/19_03_2017/'
dirOutputData = '/home/john/Projects/RepoNumerAI/data/processed/'
# Set seed for reproducibility
np.random.seed(0)
print("Loading data...")
# Load the data from the CSV files
training_data = pd.read_csv(dirRawData + 'numerai_training_data.csv', header=0)
prediction_data = pd.read_csv(dirRawData + 'numerai_tournament_data.csv', header=0)
Y = training_data['target']
X = training_data.drop('target', axis=1)
t_id = prediction_data['t_id']
x_prediction = prediction_data.drop('t_id', axis=1)
model = linear_model.LogisticRegression(n_jobs=-1)
print("Training...")
# Your model is trained on the numerai_training_data
model.fit(X, Y)
print("Predicting...")
# Your trained model is now used to make predictions on the numerai_tournament_data
# The model returns two columns: [probability of 0, probability of 1]
# We are just interested in the probability that the target is 1.
y_prediction = model.predict_proba(x_prediction)
results = y_prediction[:, 1]
results_df = pd.DataFrame(data={'probability':results})
joined = pd.DataFrame(t_id).join(results_df)
print("Writing predictions to predictions.csv")
# Save the predictions out to a CSV file
joined.to_csv(dirOutputData + "predictions.csv", index=False)
# Now you can upload these predictions on numer.ai
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set paths to the Data
Step2: Transform the loaded CSV data into numpy arrays
Step3: This is your model that will learn to predict
|
5,413
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
FIELDS = {
'auth_read': 'user', # Credentials used for reading data.
'account_ids': [],
'dataset': '', # Dataset to be written to in BigQuery.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'ga_settings_download': {
'description': 'Will create tables with format ga_* to hold each endpoint via a call to the API list function.',
'auth': 'user',
'accounts': {'field': {'name': 'account_ids','kind': 'integer_list','order': 1,'default': []}},
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 2,'default': '','description': 'Dataset to be written to in BigQuery.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Get Cloud Project ID
Step2: 3. Get Client Credentials
Step3: 4. Enter Google Analytics Timeline Parameters
Step4: 5. Execute Google Analytics Timeline
|
5,414
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'fgoals-f3-h', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
5,415
|
<ASSISTANT_TASK:>
Python Code:
!pip install stim
import stim
circuit = stim.Circuit()
# First, the circuit will initialize a Bell pair.
circuit.append_operation("H", [0])
circuit.append_operation("CNOT", [0, 1])
# Then, the circuit will measure both qubits of the Bell pair in the Z basis.
circuit.append_operation("M", [0, 1])
# Let's see the circuit's representation using stim's circuit language:
print(repr(circuit))
sampler = circuit.compile_sampler()
print(sampler.sample(shots=10))
# Indicate the two previous measurements are supposed to consistently agree.
circuit.append_operation("DETECTOR", [stim.target_rec(-1), stim.target_rec(-2)])
print(repr(circuit))
sampler = circuit.compile_detector_sampler()
print(sampler.sample(shots=5))
circuit = stim.Circuit(
H 0
CX 0 1
X_ERROR(0.2) 0 1
M 0 1
DETECTOR rec[-1] rec[-2]
)
sampler = circuit.compile_detector_sampler()
print(sampler.sample(shots=10))
import numpy as np
print(np.sum(sampler.sample(shots=10**6)) / 10**6)
circuit = stim.Circuit.generated(
"repetition_code:memory",
rounds=100,
distance=9,
before_round_data_depolarization=0.03)
print(repr(circuit))
sampler = circuit.compile_sampler()
one_sample = sampler.sample(shots=1)[0]
for k in range(0, len(one_sample), 9):
timeslice = one_sample[k:k+9]
print("".join("_1"[e] for e in timeslice))
detector_sampler = circuit.compile_detector_sampler()
one_sample = detector_sampler.sample(shots=1)[0]
for k in range(0, len(one_sample), 9):
timeslice = one_sample[k:k+9]
print("".join("_1"[e] for e in timeslice))
print(repr(circuit.detector_error_model()))
!pip install pymatching
##########################################################
#################### BEGIN GLUE CODE #####################
##########################################################
import math
import networkx as nx
import pymatching
from typing import Callable, List
def predict_observable_errors_using_pymatching(circuit: stim.Circuit,
det_samples: np.ndarray,
) -> np.ndarray:
Turn detection events into predicted observable errors.
error_model = circuit.detector_error_model(decompose_errors=True)
matching_graph = detector_error_model_to_pymatching_graph(error_model)
num_shots = det_samples.shape[0]
num_obs = circuit.num_observables
num_dets = circuit.num_detectors
assert det_samples.shape[1] == num_dets
predictions = np.zeros(shape=(num_shots, num_obs), dtype=np.bool8)
for k in range(num_shots):
expanded_det = np.resize(det_samples[k], num_dets + 1)
expanded_det[-1] = 0
predictions[k] = matching_graph.decode(expanded_det)
return predictions
def detector_error_model_to_pymatching_graph(model: stim.DetectorErrorModel) -> pymatching.Matching:
Convert a stim error model into a pymatching graph.
g = detector_error_model_to_nx_graph(model)
num_detectors = model.num_detectors
num_observables = model.num_observables
# Add spandrels to the graph to ensure pymatching will accept it.
# - Make sure there's only one connected component.
# - Make sure no detector nodes are skipped.
# - Make sure no observable nodes are skipped.
for k in range(num_detectors):
g.add_node(k)
g.add_node(num_detectors + 1)
for k in range(num_detectors + 1):
g.add_edge(k, num_detectors + 1, weight=9999999999)
g.add_edge(num_detectors, num_detectors + 1, weight=9999999999, qubit_id=list(range(num_observables)))
return pymatching.Matching(g)
def detector_error_model_to_nx_graph(model: stim.DetectorErrorModel) -> nx.Graph:
Convert a stim error model into a NetworkX graph.
g = nx.Graph()
boundary_node = model.num_detectors
g.add_node(boundary_node, is_boundary=True, coords=[-1, -1, -1])
def handle_error(p: float, dets: List[int], frame_changes: List[int]):
if p == 0:
return
if len(dets) == 0:
# No symptoms for this error.
# Code probably has distance 1.
# Accept it and keep going, though of course decoding will probably perform terribly.
return
if len(dets) == 1:
dets = [dets[0], boundary_node]
if len(dets) > 2:
raise NotImplementedError(
f"Error with more than 2 symptoms can't become an edge or boundary edge: {dets!r}.")
if g.has_edge(*dets):
edge_data = g.get_edge_data(*dets)
old_p = edge_data["error_probability"]
old_frame_changes = edge_data["qubit_id"]
# If frame changes differ, the code has distance 2; just keep whichever was first.
if set(old_frame_changes) == set(frame_changes):
p = p * (1 - old_p) + old_p * (1 - p)
g.remove_edge(*dets)
g.add_edge(*dets, weight=math.log((1 - p) / p), qubit_id=frame_changes, error_probability=p)
def handle_detector_coords(detector: int, coords: np.ndarray):
g.add_node(detector, coords=coords)
eval_model(model, handle_error, handle_detector_coords)
return g
def eval_model(
model: stim.DetectorErrorModel,
handle_error: Callable[[float, List[int], List[int]], None],
handle_detector_coords: Callable[[int, np.ndarray], None]):
Interprets the error model instructions, taking care of loops and shifts.
Makes callbacks as error mechanisms are declared, and also when detector
coordinate data is declared.
det_offset = 0
coords_offset = np.zeros(100, dtype=np.float64)
def _helper(m: stim.DetectorErrorModel, reps: int):
nonlocal det_offset
nonlocal coords_offset
for _ in range(reps):
for instruction in m:
if isinstance(instruction, stim.DemRepeatBlock):
_helper(instruction.body_copy(), instruction.repeat_count)
elif isinstance(instruction, stim.DemInstruction):
if instruction.type == "error":
dets: List[int] = []
frames: List[int] = []
t: stim.DemTarget
p = instruction.args_copy()[0]
for t in instruction.targets_copy():
if t.is_relative_detector_id():
dets.append(t.val + det_offset)
elif t.is_logical_observable_id():
frames.append(t.val)
elif t.is_separator():
# Treat each component of a decomposed error as an independent error.
# (Ideally we could configure some sort of correlated analysis; oh well.)
handle_error(p, dets, frames)
frames = []
dets = []
# Handle last component.
handle_error(p, dets, frames)
elif instruction.type == "shift_detectors":
det_offset += instruction.targets_copy()[0]
a = np.array(instruction.args_copy())
coords_offset[:len(a)] += a
elif instruction.type == "detector":
a = np.array(instruction.args_copy())
for t in instruction.targets_copy():
handle_detector_coords(t.val + det_offset, a + coords_offset[:len(a)])
elif instruction.type == "logical_observable":
pass
else:
raise NotImplementedError()
else:
raise NotImplementedError()
_helper(model, 1)
##########################################################
##################### END GLUE CODE ######################
##########################################################
def count_logical_errors(circuit: stim.Circuit, num_shots: int) -> int:
shots = circuit.compile_detector_sampler().sample(num_shots, append_observables=True)
detector_parts = shots[:, :circuit.num_detectors]
actual_observable_parts = shots[:, circuit.num_detectors:]
predicted_observable_parts = predict_observable_errors_using_pymatching(circuit, detector_parts)
num_errors = 0
for actual, predicted in zip(actual_observable_parts, predicted_observable_parts):
if not np.array_equal(actual, predicted):
num_errors += 1
return num_errors
circuit = stim.Circuit.generated("repetition_code:memory", rounds=100, distance=9, before_round_data_depolarization=0.03)
num_shots = 1000
num_logical_errors = count_logical_errors(circuit, num_shots)
print("logical_error_rate at 3%:", num_logical_errors / num_shots)
num_naive_logical_errors = np.sum(circuit.compile_detector_sampler().sample(shots=num_shots, append_observables=True)[:, -1])
print("naive logical error rate at 3%:", num_naive_logical_errors / num_shots)
circuit = stim.Circuit.generated(
"repetition_code:memory",
rounds=100,
distance=9,
before_round_data_depolarization=0.13)
num_shots = 1000
num_logical_errors = count_logical_errors(circuit, num_shots)
print("logical_error_rate at 13%:", num_logical_errors / num_shots)
import matplotlib.pyplot as plt
num_shots = 1000
for d in [3, 5, 7]:
xs = []
ys = []
for noise in [0.1, 0.2, 0.3, 0.4, 0.5]:
circuit = stim.Circuit.generated(
"repetition_code:memory",
rounds=d * 3,
distance=d,
before_round_data_depolarization=noise)
xs.append(noise)
ys.append(count_logical_errors(circuit, num_shots) / num_shots)
plt.plot(xs, ys, label="d=" + str(d))
plt.semilogy()
plt.xlabel("physical error rate")
plt.ylabel("logical error rate")
plt.legend()
plt.show()
print(stim.Circuit.generated(
"surface_code:unrotated_memory_z",
rounds=100,
distance=3,
after_clifford_depolarization=0.001,
after_reset_flip_probability=0.001,
before_measure_flip_probability=0.001,
before_round_data_depolarization=0.001))
num_shots = 1000
for d in [3, 5, 7]:
xs = []
ys = []
for noise in [0.0025, 0.0050, 0.0075, 0.0100]:
circuit = stim.Circuit.generated(
"surface_code:unrotated_memory_z",
rounds=d * 3,
distance=d,
after_clifford_depolarization=noise,
after_reset_flip_probability=noise,
before_measure_flip_probability=noise,
before_round_data_depolarization=noise)
xs.append(noise)
ys.append(count_logical_errors(circuit, num_shots) / num_shots)
plt.plot(xs, ys, label="d=" + str(d))
plt.semilogy()
plt.xlabel("physical error rate")
plt.ylabel("logical error rate")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Create a simple circuit, and sample from it.
Step2: You can sample from the circuit using the circuit.compile_sampler() method to get a sampler object, and then calling sample on that object. For large circuits (thousands of qubits, millions of operations), it may take a few seconds to create the sampler (because it involves performing a stabilizer tableau simulation of the circuit to get a reference sample). Once the reference sample is acquired, the sampler is returned and samples can be acquired in bulk very cheapy.
Step3: Notice how there are ten rows (because you took ten shots) with two results per row (because there were two measurements in the circuit).
Step4: A slightly subtle point about detectors is that they only assert the parity is consistent.
Step6: There are 5 rows in the results, because you took 5 shots.
Step7: Now that you've put noise before the measurements, try sampling some more detector shots and see what happens
Step8: It's no longer all zeroes (...unless you got pretty lucky).
Step9: As you can see, the directly estimate value is close to the expected value $0.32$.
Step10: The circuits generated by Stim include a lot of "nice to have" features.
Step11: See how the 1s seem to come in pairs of streaks?
Step12: Notice how the 1s tend to come in pairs, except near the sides.
Step17: This format is easier for decoders to consume than a raw circuit, because everything is explained in terms of observable symptoms and hidden symptoms, which is how decoders usually conceptualize of the problem space.
Step18: A notable detail about this glue code is that it calls circuit.detector_error_model(decompose_errors=True) on the circuit you give it, instead of just circuit.detector_error_model().
Step19: And then try it on your repetition code circuit
Step20: Contrast this smooth sailing with what you get from doing no error correction, and just assuming that the logical observable is correct
Step21: Wow, the correction is working a whole lot better than the naive approach.
Step22: Ah. The existence of failure. Perfect.
Step23: From the results here you can see that the repetition code has an amazingly high threshold! Somewhere around 30%-40%. Well... it's not quite so amazing when you remember that you're using a phenomenological noise model (instead of a circuit level noise model) and also that you're inserting depolarizing errors instead of bit flip errors (the repetition code is immune to Z errors, and when a depolarizing error occurs it's a Z error one third of the time).
Step24: You're specifying several more error parameters now, in order to get circuit level noise instead of phenomenological noise.
|
5,416
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%ls
%ls rr-intro-data-v0.2/intro/data/
gap_5060 = pd.read_csv('rr-intro-data-v0.2/intro/data/gapminder-5060.csv')
gap_5060_CA = gap_5060.loc[gap_5060['country'] == 'Canada']
%matplotlib inline
gap_5060_CA.plot(kind='line', x='year', y='lifeExp')
pass
gap_5060.loc[(gap_5060['country'] == 'Canada') & (gap_5060['year'] == 1957)]
gap_5060.loc[(gap_5060['country'] == 'Canada') & (gap_5060['year'] == 1957), 'lifeExp'] = 69.96
gap_5060.loc[(gap_5060['country'] == 'Canada') & (gap_5060['year'] == 1957)]
gap_5060_CA = gap_5060.loc[gap_5060['country'] == 'Canada']
gap_5060_CA.plot(kind='line', x='year', y='lifeExp')
pass
loc = gap_5060['country'].isin(['Canada','United States','Mexico'])
us_mexico_ca = gap_5060.loc[loc]
indexed_by_country = us_mexico_ca.set_index(['country','year'])
indexed_by_country
indexed_by_country.unstack(level='country').plot(kind='line',y='lifeExp')
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notebook
Step2: Both the magic functions and the python ones support tab-completion
Step3: Data
Step4: Task 1
Step5: Visualize
Step6: Task 2
Step7: loc[<col>, 'row'] allows assignment with =
Step8: Task 3
Step9: Task 3 - Stretch goal
Step10: To get each country as a series, we create a 2-level index
Step11: The data table now indexes by country and year.
Step12: Visualization code is almost the same, but we tell Pandas how to unstack the data into multiple series
|
5,417
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import holoviews as hv
from IPython.display import HTML
hv.notebook_extension()
xs = range(10)
ys = np.exp(xs)
table = hv.Table((xs, ys), kdims=['x'], vdims=['y'])
table
hv.Scatter(table) + hv.Curve(table) + hv.Bars(table)
print(repr(hv.Scatter({'x': xs, 'y': ys}) +
hv.Scatter(np.column_stack([xs, ys])) +
hv.Scatter(pd.DataFrame({'x': xs, 'y': ys}))))
print(repr(hv.Scatter(ys) + hv.Scatter((xs, ys)) + hv.Scatter(zip(xs, ys))))
df = pd.DataFrame({'x': xs, 'y': ys, 'z': ys*2})
print(type(hv.Scatter(df).data))
hv.Dataset.datatype
print(type(hv.Scatter((xs, ys), datatype=['array']).data))
print(type(hv.Scatter((xs, ys), datatype=['dictionary']).data))
print(type(hv.Scatter((xs, ys), datatype=['dataframe']).data))
overlay = hv.Scatter(df, kdims='x', vdims='y') * hv.Scatter(df, kdims='x', vdims='z')
overlay
overlay.Scatter.I.data is overlay.Scatter.II.data
table.array()
HTML(table.dframe().head().to_html())
table.columns()
xs = np.arange(10)
curve = hv.Curve(zip(xs, np.exp(xs)))
curve * hv.Scatter(zip(xs, curve)) + curve.table()
HTML(curve.dframe().to_html())
%%opts Points (s=200) [size_index=None]
extents = (-1.6,-2.7,2.0,3)
np.random.seed(42)
mat = np.random.rand(3, 3)
img = hv.Image(mat, bounds=extents)
raster = hv.Raster(mat)
img * hv.Points(img) + img.table() + \
raster * hv.Points(raster) + raster.table()
obs_hmap = hv.HoloMap({i: hv.Image(np.random.randn(10, 10), bounds=(0,0,3,3))
for i in range(3)}, key_dimensions=['Observation'])
obs_hmap
%%opts Layout [fig_size=150] Scatter3D [color_index=3 size_index=None] (cmap='hot' edgecolor='k' s=50)
obs_hmap.table().to.scatter3d() + obs_hmap.table()
from itertools import product
extents = (0,0,3,3)
error_hmap = hv.HoloMap({(i, j): hv.Image(j*np.random.randn(3, 3), bounds=extents)
for i, j in product(range(3), np.linspace(0, 1, 3))},
key_dimensions=['Observation', 'noise'])
noise_layout = error_hmap.layout('noise')
noise_layout
%%opts Table [fig_size=150]
noise_layout.table()
bars = hv.Bars((['C', 'A', 'B', 'D'], [2, 7, 3, 4]))
bars + bars.sort() + bars.sort(['y'])
n = np.arange(1000)
xs = np.repeat(range(2), 500)
ys = n%4
zs = np.random.randn(1000)
table = hv.Table((xs, ys, zs), kdims=['x', 'y'], vdims=['z'])
table
%%opts BoxWhisker [aspect=2 fig_size=200 bgcolor='w']
hv.BoxWhisker(table)
%%opts Bars [show_legend=False] {+axiswise}
hv.Bars(table).aggregate(function=np.mean) + hv.Bars(table).reduce(x=np.mean)
hmap = hv.HoloMap({i: hv.Curve(np.arange(10)*i) for i in range(10)})
collapsed = hmap.collapse(function=np.mean, spreadfn=np.std)
hv.Spread(collapsed) * hv.Curve(collapsed) + collapsed.table()
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
dimensions = {'unem': 'Unemployment',
'capmob': 'Capital Mobility',
'gdp': 'GDP Growth',
'trade': 'Trade',
'year': 'Year',
'country': 'Country'}
macro_df = macro_df.rename(columns=dimensions)
%output dpi=100
options = hv.Store.options()
opts = hv.Options('plot', aspect=2, fig_size=250, show_frame=False, show_grid=True, legend_position='right')
options.NdOverlay = opts
options.Overlay = opts
macro = hv.Table(macro_df, kdims=['Year', 'Country'])
%%opts Table [aspect=1.5 fig_size=300]
macro = macro.sort()
macro[1988]
%%opts Curve (color=Palette('Set3'))
gdp_curves = macro.to.curve('Year', 'GDP Growth')
gdp_curves.overlay('Country')
%%opts Overlay [bgcolor='w' legend_position='top_right'] Curve (color='k' linewidth=1) Spread (facecolor='gray' alpha=0.2)
hv.Spread(gdp_curves.collapse('Country', np.mean, np.std), label='std') *\
hv.Overlay([gdp_curves.collapse('Country', fn).relabel(name)(style=dict(linestyle=ls))
for name, fn, ls in [('max', np.max, '--'), ('mean', np.mean, '-'), ('min', np.min, '--')]])
%opts Bars [bgcolor='w' aspect=3 figure_size=450 show_frame=False]
%%opts Bars [category_index=2 stack_index=0 group_index=1 legend_position='top' legend_cols=7 color_by=['stack']] (color=Palette('Dark2'))
macro.to.bars(['Country', 'Year'], 'Trade', [])
%%opts Bars [padding=0.02 color_by=['group']] (alpha=0.6, color=Palette('Set1', reverse=True)[0.:.2])
countries = {'Belgium', 'Netherlands', 'Sweden', 'Norway'}
macro.to.bars(['Country', 'Year'], 'Unemployment').select(Year=(1978, 1985), Country=countries)
%opts HeatMap [show_values=False xticks=40 xrotation=90 aspect=1.2 invert_yaxis=True colorbar=True]
%opts Layout [figure_size=120 aspect_weight=0.5 hspace=0.8 vspace=0]
hv.Layout([macro.to.heatmap(['Year', 'Country'], value)
for value in macro.data.columns[2:]]).cols(2)
%%opts Scatter [scaling_method='width' scaling_factor=2] (color=Palette('Set3') edgecolors='k')
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country')
%%opts NdOverlay [legend_cols=2] Scatter [size_index=1] (color=Palette('Blues'))
macro.to.scatter('GDP Growth', 'Unemployment', ['Year']).overlay()
%%opts Curve (color='k') Scatter [color_index=2 size_index=2 scaling_factor=1.4] (cmap='Blues' edgecolors='k')
macro_overlay = gdp_curves * gdp_unem_scatter
annotations = hv.Arrow(1973, 8, 'Oil Crisis', 'v') * hv.Arrow(1975, 6, 'Stagflation', 'v') *\
hv.Arrow(1979, 8, 'Energy Crisis', 'v') * hv.Arrow(1981.9, 5, 'Early Eighties\n Recession', 'v')
macro_overlay * annotations
%opts Overlay [aspect=1]
%%opts NdLayout [figure_size=100] Scatter [color_index=2] (cmap='Reds')
countries = {'United States', 'Canada', 'United Kingdom'}
(gdp_curves * gdp_unem_scatter).select(Country=countries).layout('Country')
%%opts Layout [fig_size=100] Scatter [color_index=2] (cmap='Reds')
(macro_overlay.relabel('GDP Growth', depth=1) +\
macro.to.curve('Year', 'Unemployment', ['Country'], group='Unemployment',) +\
macro.to.curve('Year', 'Trade', ['Country'], group='Trade') +\
macro.to.scatter('GDP Growth', 'Unemployment', ['Country'])).cols(2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Dataset
Step2: However, this data has many more meaningful visual representations, and therefore the first important concept is that Dataset objects are interchangeable as long as their dimensionality allows it, meaning that you can easily create the different objects from the same data (and cast between the objects once created)
Step3: Each of these three plots uses the same data, but represents a different assumption about the semantic meaning of that data -- the Scatter plot is appropriate if that data consists of independent samples, the Curve plot is appropriate for samples chosen from an underlying smooth function, and the Bars plot is appropriate for independent categories of data. Since all these plots have the same dimensionality, they can easily be converted to each other, but there is normally only one of these representations that is semantically appropriate for the underlying data. For this particular data, the semantically appropriate choice is Curve, since the y values are samples from the continuous function exp.
Step4: Literals
Step5: For these inputs, the data will need to be copied to a new data structure, having one of the three storage formats above. By default Dataset will try to construct a simple array, falling back to either pandas dataframes (if available) or the dictionary-based format if the data is not purely numeric. Additionally, the interfaces will try to maintain the provided data's type, so numpy arrays and pandas DataFrames will therefore always be parsed by the array and dataframe interfaces first respectively.
Step6: Dataset will attempt to parse the supplied data, falling back to each consecutive interface if the previous could not interpret the data. The default list of fallbacks and simultaneously the list of allowed datatypes is
Step7: To select a particular storage format explicitly, supply one or more allowed datatypes
Step8: Sharing Data
Step9: We can quickly confirm that the data is actually shared
Step10: For columnar data, this approach is much more efficient than creating copies of the data for each Element, and allows for some advanced features like linked brushing in the Bokeh backend.
Step11: Pandas DataFrame
Step12: Dataset dictionary
Step13: Creating tabular data from Elements using the .table and .dframe methods
Step14: Similarly, we can get a pandas dataframe of the Curve using curve.dframe(). Here we wrap that call as raw HTML to allow automated testing of this notebook, but just calling curve.dframe() would give the same result visually
Step15: Although 2D image-like objects are not inherently well suited to a flat columnar representation, serializing them by converting to tabular data is a good way to reveal the differences between Image and Raster elements. Rasters are a very simple type of element, using array-like integer indexing of rows and columns from their top-left corner as in computer graphics applications. Conversely, Image elements are a higher-level abstraction that provides a general-purpose continuous Cartesian coordinate system, with x and y increasing to the right and upwards as in mathematical applications, and each point interpreted as a sample representing the pixel in which it is located (and thus centered within that pixel). Given the same data, the .table() representation will show how the data is being interpreted (and accessed) differently in the two cases (as explained in detail in the Continuous Coordinates Tutorial)
Step16: Tabularizing space containers
Step17: Now we can serialize this data just as before, where this time we get a four-column (4D) table. The key dimensions of both the HoloMap and the Images, as well as the z-values of each Image, are all merged into a single table. We can visualize the samples we have collected by converting it to a Scatter3D object.
Step18: Here the z dimension is shown by color, as in the original images, and the other three dimensions determine where the datapoint is shown in 3D. This way of deconstructing will work for any data structure that satisfies the conditions described above, no matter how nested. If we vary the amount of noise while continuing to performing multiple observations, we can create an NdLayout of HoloMaps, one for each level of noise, and animated by the observation number.
Step19: And again, we can easily convert the object to a Table
Step20: Applying operations to the data
Step21: Working with categorical or grouped data
Step22: Since there are repeat observations of the same x- and y-values, we have to reduce the data before we display it or else use a datatype that supports plotting distributions in this way. The BoxWhisker type allows doing exactly that
Step23: Aggregating/Reducing dimensions
Step24: (A) aggregates over both the x and y dimension, computing the mean for each x/y group, while (B) reduces the x dimension leaving just the mean for each group along y.
Step25: Working with complex data
Step26: We'll also take this opportunity to set default options for all the following plots.
Step27: Loading the data
Step28: To get an overview of the data we'll quickly sort it and then view the data for one year.
Step29: Most of the examples above focus on converting a Table to simple Element types, but HoloViews also provides powerful container objects to explore high-dimensional data, such as HoloMap, NdOverlay, NdLayout, and GridSpace. HoloMaps work as a useful interchange format from which you can conveniently convert to the other container types using its .overlay(), .layout(), and .grid() methods. This way we can easily create an overlay of GDP Growth curves by year for each country. Here Year is a key dimension and GDP Growth a value dimension. We are then left with the Country dimension, which we can overlay using the .overlay() method.
Step30: Now that we've extracted the gdp_curves, we can apply some operations to them. As in the simpler example above we will collapse the HoloMap of Curves using a number of functions to visualize the distribution of GDP Growth rates over time. First we find the mean curve with np.std as the spreadfn and cast the result to a Spread type, then we compute the min, mean and max curve in the same way and put them all inside an Overlay.
Step31: Many HoloViews Element types support multiple kdims, including HeatMap, Points, Scatter, Scatter3D, and Bars. Bars in particular allows you to lay out your data in groups, categories and stacks. By supplying the index of that dimension as a plotting option you can choose to lay out your data as groups of bars, categories in each group, and stacks. Here we choose to lay out the trade surplus of each country with groups for each year, no categories, and stacked by country. Finally, we choose to color the Bars for each item in the stack.
Step32: This plot contains a lot of data, and so it's probably a good idea to focus on specific aspects of it, telling a simpler story about them. For instance, using the .select method we can then customize the palettes (e.g. to use consistent colors per country across multiple analyses).
Step33: Many HoloViews Elements support multiple key and value dimensions. A HeatMap is indexed by two kdims, so we can visualize each of the economic indicators by year and country in a Layout. Layouts are useful for heterogeneous data you want to lay out next to each other.
Step34: Another way of combining heterogeneous data dimensions is to map them to a multi-dimensional plot type. Scatter Elements, for example, support multiple vdims, which may be mapped onto the color and size of the drawn points in addition to the y-axis position.
Step35: In this way we can plot any dimension against any other dimension, very easily allowing us to iterate through different ways of revealing relationships in the dataset.
Step36: This view, for example, immediately highlights the high unemployment rates of the 1980s.
Step37: Since we didn't map the country to some other container type, we get a widget allowing us to view the plot separately for each country, reducing the forest of curves we encountered before to manageable chunks.
Step38: Finally, let's combine some plots for each country into a Layout, giving us a quick overview of each economic indicator for each country
|
5,418
|
<ASSISTANT_TASK:>
Python Code:
x = 5**3
print(x)
import math
# Calculate square root of 25
x = math.sqrt(25)
print (x)
# Calculate cube root of 64
cr = round(64 ** (1. / 3))
print(cr)
import math
print (9**0.5)
print (math.sqrt(9))
import math
x = math.log(16, 4)
print(x)
import math
# Natural log of 29
print (math.log(29))
# Common log of 100
print(math.log10(100))
import pandas as pd
# Create a dataframe with an x column containing values from -10 to 10
df = pd.DataFrame ({'x': range(-10, 11)})
# Add a y column by applying the slope-intercept equation to x
df['y'] = 3*df['x']**3
#Display the dataframe
print(df)
# Plot the line
%matplotlib inline
from matplotlib import pyplot as plt
plt.plot(df.x, df.y, color="magenta")
plt.xlabel('x')
plt.ylabel('y')
plt.grid()
plt.axhline()
plt.axvline()
plt.show()
import pandas as pd
# Create a dataframe with an x column containing values from -10 to 10
df = pd.DataFrame ({'x': range(-10, 11)})
# Add a y column by applying the slope-intercept equation to x
df['y'] = 2.0**df['x']
#Display the dataframe
print(df)
# Plot the line
%matplotlib inline
from matplotlib import pyplot as plt
plt.plot(df.x, df.y, color="magenta")
plt.xlabel('x')
plt.ylabel('y')
plt.grid()
plt.axhline()
plt.axvline()
plt.show()
import pandas as pd
# Create a dataframe with 20 years
df = pd.DataFrame ({'Year': range(1, 21)})
# Calculate the balance for each year based on the exponential growth from interest
df['Balance'] = 100 * (1.05**df['Year'])
#Display the dataframe
print(df)
# Plot the line
%matplotlib inline
from matplotlib import pyplot as plt
plt.plot(df.Year, df.Balance, color="green")
plt.xlabel('Year')
plt.ylabel('Balance')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multiplying a number by itself twice or three times to calculate the square or cube of a number is a common operation, but you can raise a number by any exponential power. For example, the following notation shows 4 to the power of 7 (or 4 x 4 x 4 x 4 x 4 x 4 x 4), which has the value
Step2: The code used in Python to calculate roots other than the square root reveals something about the relationship between roots and exponentials. The exponential root of a number is the same as that number raised to the power of 1 divided by the exponential. For example, consider the following statement
Step3: Logarithms
Step4: The final thing you need to know about exponentials and logarithms is that there are some special logarithms
Step5: Solving Equations with Exponentials
Step6: Note that the line is curved. This is symptomatic of an exponential equation
Step7: Note that when the exponential is a negative number, Python reports the result as 0. Actually, it's a very small fractional number, but because the base is positive the exponential number will always positive. Also, note the rate at which y increases as x increases - exponential growth can be be pretty dramatic.
|
5,419
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import theano
from theano import tensor
#from blocks import initialization
from blocks.bricks import Identity, Linear, Tanh, MLP, Softmax
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import SimpleRecurrent, Bidirectional, BaseRecurrent
from blocks.bricks.parallel import Merge
#from blocks.bricks.parallel import Fork
from blocks.bricks.cost import CategoricalCrossEntropy
from blocks.initialization import IsotropicGaussian, Constant
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.roles import INPUT, WEIGHT, OUTPUT
vocab_size=3
embedding_dim=3
labels_size=10
lookup = LookupTable(vocab_size, embedding_dim)
encoder = Bidirectional(SimpleRecurrent(dim=embedding_dim, activation=Tanh()))
mlp = MLP([Softmax()], [embedding_dim, labels_size],
weights_init=IsotropicGaussian(0.01),
biases_init=Constant(0))
#encoder.prototype.apply.sequences
#dir(encoder.prototype.apply.sequences)
#combine = Merge(input_dims=dict(), output_dim=labels_size)
#labelled = Softmax( encoder )
x = tensor.imatrix('features')
y = tensor.imatrix('targets')
probs = mlp.apply(encoder.apply(lookup.apply(x)))
cost = CategoricalCrossEntropy().apply(y.flatten(), probs)
#cg = ComputationGraph([cost])
cg = ComputationGraph([probs])
cg.variables
#VariableFilter(roles=[OUTPUT])(cg.variables)
#dir(cg.outputs)
#np.shape(cg.outputs)
#mlp = MLP([Softmax()], [embedding_dim*2, labels_size],
# weights_init=IsotropicGaussian(0.01),
# biases_init=Constant(0))
#mlp.initialize()
#fork = Fork([name for name in encoder.prototype.apply.sequences if name != 'mask'])
#fork.input_dim = dimension
#fork.output_dims = [dimension for name in fork.input_names]
#print(fork.output_dims)
#readout = Readout(
# readout_dim=labels_size,
# source_names=[encodetransition.apply.states[0], attention.take_glimpses.outputs[0]],
# emitter=SoftmaxEmitter(name="emitter"),
# #feedback_brick=LookupFeedback(alphabet_size, dimension),
# name="readout")
h0 = tensor.matrix('h0')
h = rnn.apply(inputs=x, states=h0)
f = theano.function([x, h0], h)
print(f(np.ones((3, 1, 3), dtype=theano.config.floatX),
np.ones((1, 3), dtype=theano.config.floatX)))
class FeedbackRNN(BaseRecurrent):
def __init__(self, dim, **kwargs):
super(FeedbackRNN, self).__init__(**kwargs)
self.dim = dim
self.first_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='first_recurrent_layer',
weights_init=initialization.Identity())
self.second_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='second_recurrent_layer',
weights_init=initialization.Identity())
self.children = [self.first_recurrent_layer,
self.second_recurrent_layer]
@recurrent(sequences=['inputs'], contexts=[],
states=['first_states', 'second_states'],
outputs=['first_states', 'second_states'])
def apply(self, inputs, first_states=None, second_states=None):
first_h = self.first_recurrent_layer.apply(
inputs=inputs, states=first_states + second_states, iterate=False)
second_h = self.second_recurrent_layer.apply(
inputs=first_h, states=second_states, iterate=False)
return first_h, second_h
def get_dim(self, name):
return (self.dim if name in ('inputs', 'first_states', 'second_states')
else super(FeedbackRNN, self).get_dim(name))
x = tensor.tensor3('x')
feedback = FeedbackRNN(dim=3)
feedback.initialize()
first_h, second_h = feedback.apply(inputs=x)
f = theano.function([x], [first_h, second_h])
for states in f(np.ones((3, 1, 3), dtype=theano.config.floatX)):
print(states)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now the output layer needs to gather the two hidden layers (one from each direction)
Step2: Note that in order to double the input we had to apply a bricks.Linear brick to x, even though $h_t=f(Vh_{t−1}+Wx_t+b)$ is what is usually thought of as the RNN equation. The reason why recurrent bricks work that way is it allows greater flexibility and modularity
Step3: Iterate (or not)
|
5,420
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
data_dir = "./Data/Weather/"
!curl -o $data_dir/STAT.pickle http://mas-dse-open.s3.amazonaws.com/Weather/STAT.pickle
import pickle
STAT,STAT_description=pickle.load(open(data_dir+'/STAT.pickle','r'))
STAT.keys()
STAT_description
Scalars=['mean','std','low1000','low100','high100','high1000']
for meas in STAT.keys():
!grep $meas './Data/Weather/ghcnd-readme.txt'
S=STAT[meas]
for scalar in Scalars:
print '%s:%f'%(scalar,S[scalar]),
print
def YearlyPlots(T,ttl='',yl='',xl='',y=None,x=None,size=(10,7), c=None):
yearday=[i for i in range(1,366)]
fig=figure(1,figsize=size,dpi=300)
if shape(T)[0] != 365:
raise ValueError("First dimension of T should be 365. Shape(T)="+str(shape(T)))
if c is not None:
plot_date(yearday,T, '-',color=c);
else:
plot_date(yearday,T, '-', );
# rotate and align the tick labels so they look better
#fig.autofmt_xdate()
plt.gca().xaxis.set_major_formatter( DateFormatter('%b') )
ylabel(yl)
xlabel(xl)
if y is not None:
ylim(y)
if x is not None:
xlim(x)
grid()
title(ttl)
figure(figsize=(15,30))
offset=1
for meas in STAT.keys():
subplot(6,3,offset)
offset+=1
S=STAT[meas]
pyplot.hist(S['SortedVals'],bins=np.arange((S['low100']), (S['high100']), 5))
subplot(6,3,offset)
offset+=1
## Your code for mean and mean +- std
std = sqrt(STAT[meas]["Var"])
mean_y = STAT[meas]['Mean']
std_plus_y = mean_y + std
std_minus_y = mean_y - std
YearlyPlots(mean_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='r')
YearlyPlots(std_plus_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='b')
YearlyPlots(std_minus_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='b')
subplot(6,3,offset)
offset+=1
YearlyPlots(STAT[meas]['NE'], ttl="counts")
YearlyPlots(STAT["TMAX"]['NE'], ttl="counts", c='black')
figure(figsize=(15,30))
offset=1
for meas in STAT.keys():
subplot(6,3,offset)
offset+=1
## Your code for percentage of variance explained
S=STAT[meas]
tvar = S['eigval'].sum()
explained_variance_ratio_ = S['eigval']/tvar
pyplot.plot(np.cumsum(explained_variance_ratio_[:10]))
subplot(6,3,offset)
offset+=1
## Your code for mean and mean +- std
std = sqrt(STAT[meas]["Var"])
mean_y = STAT[meas]['Mean']
std_plus_y = mean_y + std
std_minus_y = mean_y - std
YearlyPlots(mean_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='r')
YearlyPlots(std_plus_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='b')
YearlyPlots(std_minus_y,ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='b')
subplot(6,3,offset)
offset+=1
## Your code for top-3 eigenvectors
YearlyPlots(S['eigvec'][:,0], ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='r')
YearlyPlots(S['eigvec'][:,1], ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='g')
YearlyPlots(S['eigvec'][:,2], ttl=meas+' mean +-std',yl='',xl='',y=None,x=None,size=(10,7), c='b')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading Pickled data from S3
Step2: Get the statistics from the Pickle File
Step3: Script for plotting yearly plots
Step4: Plot the following 3 plots for each measurement
Step5: Plot the Number of measurements recorded each day for TMAX
Step6: Extra Credit
|
5,421
|
<ASSISTANT_TASK:>
Python Code:
from nupic.engine import Network, Dimensions
# Create Network instance
network = Network()
# Add three TestNode regions to network
network.addRegion("region1", "TestNode", "")
network.addRegion("region2", "TestNode", "")
network.addRegion("region3", "TestNode", "")
# Set dimensions on first region
region1 = network.getRegions().getByName("region1")
region1.setDimensions(Dimensions([1, 1]))
# Link regions
network.link("region1", "region2", "UniformLink", "")
network.link("region2", "region1", "UniformLink", "")
network.link("region1", "region3", "UniformLink", "")
network.link("region2", "region3", "UniformLink", "")
# Initialize network
network.initialize()
from nupic.frameworks.viz import NetworkVisualizer
# Initialize Network Visualizer
viz = NetworkVisualizer(network)
# Render to dot (stdout)
viz.render()
from nupic.frameworks.viz import DotRenderer
from io import StringIO
outp = StringIO()
viz.render(renderer=lambda: DotRenderer(outp))
# Render dot to image
from graphviz import Source
from IPython.display import Image
Image(Source(outp.getvalue()).pipe("png"))
from nupic.frameworks.opf.modelfactory import ModelFactory
# Note: parameters copied from examples/opf/clients/hotgym/simple/model_params.py
model = ModelFactory.create({'aggregationInfo': {'hours': 1, 'microseconds': 0, 'seconds': 0, 'fields': [('consumption', 'sum')], 'weeks': 0, 'months': 0, 'minutes': 0, 'days': 0, 'milliseconds': 0, 'years': 0}, 'model': 'CLA', 'version': 1, 'predictAheadTime': None, 'modelParams': {'sensorParams': {'verbosity': 0, 'encoders': {'timestamp_timeOfDay': {'type': 'DateEncoder', 'timeOfDay': (21, 1), 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay'}, u'consumption': {'resolution': 0.88, 'seed': 1, 'fieldname': u'consumption', 'name': u'consumption', 'type': 'RandomDistributedScalarEncoder'}, 'timestamp_weekend': {'type': 'DateEncoder', 'fieldname': u'timestamp', 'name': u'timestamp_weekend', 'weekend': 21}}, 'sensorAutoReset': None}, 'spParams': {'columnCount': 2048, 'spVerbosity': 0, 'spatialImp': 'cpp', 'synPermConnected': 0.1, 'seed': 1956, 'numActiveColumnsPerInhArea': 40, 'globalInhibition': 1, 'inputWidth': 0, 'synPermInactiveDec': 0.005, 'synPermActiveInc': 0.04, 'potentialPct': 0.85, 'boostStrength': 3.0}, 'spEnable': True, 'clParams': {'implementation': 'cpp', 'alpha': 0.1, 'verbosity': 0, 'steps': '1,5', 'regionName': 'SDRClassifierRegion'}, 'inferenceType': 'TemporalMultiStep', 'tpEnable': True, 'tpParams': {'columnCount': 2048, 'activationThreshold': 16, 'pamLength': 1, 'cellsPerColumn': 32, 'permanenceInc': 0.1, 'minThreshold': 12, 'verbosity': 0, 'maxSynapsesPerSegment': 32, 'outputType': 'normal', 'initialPerm': 0.21, 'globalDecay': 0.0, 'maxAge': 0, 'permanenceDec': 0.1, 'seed': 1960, 'newSynapseCount': 20, 'maxSegmentsPerCell': 128, 'temporalImp': 'cpp', 'inputWidth': 2048}, 'trainSPNetOnlyIfRequested': False}})
# New network, new NetworkVisualizer instance
viz = NetworkVisualizer(model._netInfo.net)
# Render to Dot output to buffer
outp = StringIO()
viz.render(renderer=lambda: DotRenderer(outp))
# Render Dot to image, display inline
Image(Source(outp.getvalue()).pipe("png"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Render with nupic.frameworks.viz.NetworkVisualizer, which takes as input any nupic.engine.Network instance
Step2: That's interesting, but not necessarily useful if you don't understand dot. Let's capture that output and do something else
Step3: outp now contains the rendered output, render to an image with graphviz
Step4: In the example above, each three-columned rectangle is a discrete region, the user-defined name for which is in the middle column. The left-hand and right-hand columns are respective inputs and outputs, the names for which, e.g. "bottumUpIn" and "bottomUpOut", are specific to the region type. The arrows indicate links between outputs from one region to the input of another.
Step5: Same deal as before, create a NetworkVisualizer instance, render to a buffer, then to an image, and finally display it inline.
|
5,422
|
<ASSISTANT_TASK:>
Python Code:
# Print periodic table to orient ourselves
Element.print_periodic_table()
# Generate list of non-radioactive elements (noble gases omitted)
def desired_element(elem):
omit = ['Po', 'At', 'Rn', 'Fr', 'Ra']
return not e.is_noble_gas and not e.is_actinoid and not e.symbol in omit
element_universe = [e for e in Element if desired_element(e)]
omitted_elements = [e for e in Element if e not in element_universe]
print("Number of included elements =", len(element_universe))
print("Omitted elements:", " ".join(sorted([e.symbol for e in omitted_elements])))
# How many crystal structures for elements exist?
with MPRester() as m:
elements = m.query(criteria = {"nelements": 1}, properties = ['icsd_ids', 'pretty_formula'])
# Basic analysis
print("#(Materials Project records) =", len(elements))
print("#(ICSD records) =", sum([len(c['icsd_ids']) for c in elements]))
# How are ICSD entries grouped into Materials Project entries?
entry_multiplicities = [len(e['icsd_ids']) for e in elements]
plt.hist(entry_multiplicities, bins = max(entry_multiplicities))
plt.xlabel('Multiplicity')
plt.ylabel('Number of occurences')
plt.title('Multiplicities of ICSD entries')
# Allotropes
from collections import defaultdict
element_multiplicities = [e['pretty_formula'] for e in elements]
allotropes = defaultdict(int, [(e, element_multiplicities.count(e)) for e in set(element_multiplicities)])
elements_sorted = [e.symbol for e in sorted(Element, key = lambda elem: elem.Z)]
xx = range(len(elements_sorted))
yy = [allotropes[elem] for elem in elements_sorted]
plt.bar(xx, yy)
plt.xlabel('Atomic number')
plt.ylabel('Allotropes')
# Omitted elements because their elemental form is molecular
omitted_allotropes = [e for e in allotropes.keys() if e not in elements_sorted]
for k in omitted_allotropes:
print(k, allotropes[k])
# Query all ternaries
with MPRester() as m:
ternaries1 = m.query(criteria = {"nelements": 3}, properties = ['icsd_ids', 'pretty_formula'])
# Basic analysis
print("#(Materials Project records) =", len(ternaries1))
print("#(ICSD records) =", sum([len(c['icsd_ids']) for c in ternaries1]))
print("#(MP with ICSD records) =", len([c for c in ternaries1 if len(c['icsd_ids']) > 0]))
print("#(Unique ternaries) =", len(set([c['pretty_formula'] for c in ternaries1 if len(c['icsd_ids']) > 0])))
# Alternate way of querying ternaries
with MPRester() as m:
ternaries2 = m.query(criteria = {"nelements": 3}, properties = ['icsd_id', 'pretty_formula'])
print("#(Materials Project records) =", len(ternaries2))
print("#(MP with ICSD records) =", len([c for c in ternaries2 if c['icsd_id'] is not None]))
print("#(Unique ternaries) =", len(set([c['pretty_formula'] for c in ternaries2 if c['icsd_id']])))
# Number of unique compositions in both querying methods
uniq_ternaries1 = set([c['pretty_formula'] for c in ternaries1])
uniq_ternaries2 = set([c['pretty_formula'] for c in ternaries2])
print("#(Unique ternaries, method 1) = ", len(uniq_ternaries1))
print("#(Unique ternaries, method 2) = ", len(uniq_ternaries2))
print("Are the sets equal?", uniq_ternaries2 == uniq_ternaries1)
icsd_ternaries1 = set([c['pretty_formula'] for c in ternaries1 if len(c['icsd_ids']) > 0])
icsd_ternaries2 = set([c['pretty_formula'] for c in ternaries2 if c['icsd_id']])
print("|T2-T1| = ", len(icsd_ternaries2 - icsd_ternaries1))
print("|T1-T2| = ", len(icsd_ternaries1 - icsd_ternaries2))
pretty_formula = (icsd_ternaries2 - icsd_ternaries1).pop()
print("Example compound in |T2 - T1| =", pretty_formula)
print([c for c in ternaries1 if c['pretty_formula'] == pretty_formula])
print([c for c in ternaries2 if c['pretty_formula'] == pretty_formula])
pretty_formula = (icsd_ternaries1 - icsd_ternaries2).pop()
print("Example compound in |T1 - T2| =", pretty_formula)
print([c for c in ternaries1 if c['pretty_formula'] == pretty_formula])
print([c for c in ternaries2 if c['pretty_formula'] == pretty_formula])
# filter by elements that I care care about -- remove radioactive elements
all_ternaries = list(icsd_ternaries1 | icsd_ternaries2)
omitted_Elements = [Element(e) for e in omitted_elements]
omitted_ternaries = [c for c in all_ternaries if any((e in omitted_Elements) for e in Composition(c))]
icsd_ternaries = [c for c in all_ternaries if c not in omitted_ternaries]
print("Number of omitted ternaries =", len(omitted_ternaries))
print("Examples:", omitted_ternaries[:5])
len(icsd_ternaries)
from collections import Counter
def composition_to_tuple(name):
return tuple(sorted([e.symbol for e in Composition(name)]))
def phasediag_distribution(compounds, N_universe):
counts = Counter([composition_to_tuple(c) for c in compounds])
hist = Counter(counts.values())
hist[0] = N_universe - len(counts) # add point corresponding to universe
return hist
from scipy.misc import comb
N_ternary_diagrams = int(comb(len(element_universe), 3)) # N choose 3 = number of ternary phase diagrams
hist = phasediag_distribution(icsd_ternaries, N_ternary_diagrams)
xx, yy = np.array(hist.items()).T
plt.semilogy(xx, yy, 'o-')
plt.xlim(-0.5, len(xx) - 0.5)
plt.xlabel("Number of ternaries in system")
plt.ylabel("N(Number of ternaries in system)")
plt.title("Distribution of all known ternaries")
def filter_one_element(symbol, universe):
return [c for c in universe if Element(symbol) in Composition(c)]
N_diagrams = int(comb(len(element_universe)-1, 2))
anions = ["O", "S", "Se", "F", "Cl", "Br", "I", "N", "P", "C"]
grouped = [filter_one_element(X, icsd_ternaries) for X in anions]
hists = [phasediag_distribution(compounds, N_diagrams) for compounds in grouped]
plt.figure(figsize = (8,5))
for i,hist in enumerate(hists):
plt.semilogy(hist.keys(), hist.values(), 'o-', label = anions[i],
color = plt.cm.viridis(i/(len(anions)-1)), alpha = 0.7)
plt.xlim(-0.5, 14.5)
plt.ylim(0.5, None)
plt.legend(loc = "best")
plt.xlabel("Number of ternaries in system")
plt.ylabel("N(Number of ternaries in system)")
plt.title("Distribution of known ionic ternary systems")
Element.print_periodic_table()
def filter_in_set(compound, universe):
return all((e in universe) for e in Composition(compound))
transition_metals = [e for e in Element if e.is_transition_metal]
tm_ternaries = [c for c in icsd_ternaries if filter_in_set(c, transition_metals)]
print("The Materials Project doesn't have intermetallics:", len(tm_ternaries))
electronegativities = np.array([sorted([e.X for e in Composition(name).elements], reverse = True)
for name in icsd_ternaries])
np.savetxt("ternary.electronegativities", electronegativities)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(
electronegativities[:,2],
electronegativities[:,1],
electronegativities[:,0]
)
ax.set_xlabel('X Most elecneg.')
ax.set_ylabel('Y')
ax.set_zlabel('Z least elecneg.')
plt.show()
# write data to file for Louis-Francois
with open("ternaries.dat", 'w') as f:
for formula in icsd_ternaries:
c = Composition(formula)
symbols = []
ratios = []
for k,v in c.iteritems():
symbols.append(k)
ratios.append(int(v))
line = "{:15} {:2} {:2} {:2} {:2} {:2} {:2}\n".format(formula, *(symbols + ratios))
f.write(line)
data = {}
for e in Element:
data[e.symbol] = e.data
with open("elements.dat", 'w') as f:
f.write(str(data))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How complete is the Materials Project database?
Step2: Ternaries
Step3: Why is there a discrepancy between the number of unique ternaries of the two querying methods?
Step4: Conclusion
Step5: Exploratory Analysis
Step6: Distribution by Anion
Step7: Intermetallics
Step8: Electronegativity
|
5,423
|
<ASSISTANT_TASK:>
Python Code:
import itertools
import string
import functools
letters = string.ascii_lowercase
vocab = list(map(''.join, itertools.product(letters, repeat=2)))
from random import choices
def zipf_pdf(k):
return 1/k**1.07
def exponential_pdf(k, base):
return base**k
def new_document(n_words, pdf):
return set(
choices(
vocab,
weights=map(pdf, range(1, 1+len(vocab))),
k=n_words
)
)
def new_documents(n_documents, n_words, pdf):
return [new_document(n_words, pdf) for _ in range(n_documents)]
def jaccard(a, b):
return len(a & b) / len(a | b)
def all_pairs(documents):
return list(itertools.combinations(documents, 2))
def filter_similar(pairs, cutoff=0.9):
return list(filter(
lambda docs: jaccard(docs[0], docs[1]) > cutoff,
pairs
))
documents = new_documents(1000, 1000, functools.partial(exponential_pdf, base=1.1))
pairs = all_pairs(documents)
len(filter_similar(pairs))
jacards = list(map(lambda docs: jaccard(docs[0], docs[1]), pairs))
%matplotlib inline
import seaborn as sns
sns.distplot(jacards)
def create_and_filter(n_documents):
documents = new_documents(n_documents, 500, functools.partial(exponential_pdf, base=1.1))
pairs = all_pairs(documents)
return filter_similar(pairs)
import timeit
def time_create_and_filter(n_documents):
return timeit.timeit(
'create_and_filter(n)',
globals={
"n": n_documents,
"create_and_filter": create_and_filter
},
number=1
)
import pandas as pd
from tqdm import tnrange, tqdm_notebook
def create_timing_df(ns):
return pd.DataFrame({
'n': ns,
'time': list(map(time_create_and_filter, tqdm_notebook(ns)))
})
df = create_timing_df([2 ** e for e in range(1, 13)])
sns.lmplot(x="n", y="time", data=df, order=2, )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Based on the way we are choosing words, we say that 410 pairs out of 1000 documents have a high enough jaccard to call them similar. This seems realistic enough. We can fiddle with this if we want, by changing the base
Step2: We can also see that the jaccards look normally distributed
Step3: Now let's time this, to see how it increases with the number of documents $n$. We expect it to be $\Theta(n^2)$, because each document is compared to every other document.
|
5,424
|
<ASSISTANT_TASK:>
Python Code:
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n' \
'You should consider updating to Python 3.4.0 or ' \
'higher as the libraries built for this course ' \
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda '
'and then restart `jupyter notebook`:\n' \
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
except ImportError:
print('You are missing some packages! ' \
'We will try installing them before continuing!')
!pip install "numpy>=1.11.0" "matplotlib>=1.5.1" "scikit-image>=0.11.3" "scikit-learn>=0.17" "scipy>=0.17.0"
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
print('Done!')
# Import Tensorflow
try:
import tensorflow as tf
except ImportError:
print("You do not have tensorflow installed!")
print("Follow the instructions on the following link")
print("to install tensorflow before continuing:")
print("")
print("https://github.com/pkmital/CADL#installation-preliminaries")
# This cell includes the provided libraries from the zip file
# and a library for displaying images from ipython, which
# we will use to display the gif
try:
from libs import utils, gif
import IPython.display as ipyd
except ImportError:
print("Make sure you have started notebook in the same directory" +
" as the provided zip file which includes the 'libs' folder" +
" and the file 'utils.py' inside of it. You will NOT be able"
" to complete this assignment unless you restart jupyter"
" notebook inside the directory created by extracting"
" the zip file or cloning the github repo.")
# We'll tell matplotlib to inline any drawn figures like so:
%matplotlib inline
plt.style.use('ggplot')
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML(<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>)
xs = np.linspace(-6, 6, 100)
plt.plot(xs, np.maximum(xs, 0), label='relu')
plt.plot(xs, 1 / (1 + np.exp(-xs)), label='sigmoid')
plt.plot(xs, np.tanh(xs), label='tanh')
plt.xlabel('Input')
plt.xlim([-6, 6])
plt.ylabel('Output')
plt.ylim([-1.5, 1.5])
plt.title('Common Activation Functions/Nonlinearities')
plt.legend(loc='lower right')
# Create a placeholder with None x 2 dimensions of dtype tf.float32, and name it "X":
X = tf.placeholder(tf.float32, shape=(None, 2), name="X")
W = tf.get_variable("W", (2,20), initializer=tf.random_normal_initializer())
h = tf.matmul(X, W)
b = tf.get_variable("b", 20, initializer=tf.constant_initializer(1.0))
h = tf.nn.bias_add(h, b)
h = tf.nn.relu(h + b)
h, W = utils.linear(
x=X, n_output=20, name='linear', activation=tf.nn.relu)
# First load an image
import matplotlib.pyplot as plt
#img = plt.imread("mypictures/2000px-Tux.svg.png")
img = plt.imread("mypictures/tux-small.jpg")
# Be careful with the size of your image.
# Try a fairly small image to begin with,
# then come back here and try larger sizes.
img = imresize(img, (100, 100))
plt.figure(figsize=(5, 5))
plt.imshow(img)
# Make sure you save this image as "reference.png"
# and include it in your zipped submission file
# so we can tell what image you are trying to paint!
plt.imsave(fname='reference.png', arr=img)
print(img.shape)
def split_image(img):
# We'll first collect all the positions in the image in our list, xs
xs = []
# And the corresponding colors for each of these positions
ys = []
# Now loop over the image
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
# And store the inputs
xs.append([row_i, col_i])
# And outputs that the network needs to learn to predict
ys.append(img[row_i, col_i])
# we'll convert our lists to arrays
xs = np.array(xs)
ys = np.array(ys)
return xs, ys
xs, ys = split_image(img)
# and print the shapes
xs.shape, ys.shape
# Normalize the input (xs) using its mean and standard deviation
xs = (xs - np.mean(xs)) / np.std(xs)
# Just to make sure you have normalized it correctly:
print(np.min(xs), np.max(xs))
assert(np.min(xs) > -3.0 and np.max(xs) < 3.0)
print(np.min(ys), np.max(ys))
ys = ys / 255.0
print(np.min(ys), np.max(ys))
plt.imshow(ys.reshape(img.shape))
# Let's reset the graph:
tf.reset_default_graph()
# Create a placeholder of None x 2 dimensions and dtype tf.float32
# This will be the input to the network which takes the row/col
X = tf.placeholder(tf.float32, shape=[None, 2], name='X')
# Create the placeholder, Y, with 3 output dimensions instead of 2.
# This will be the output of the network, the R, G, B values.
Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y')
# We'll create 6 hidden layers. Let's create a variable
# to say how many neurons we want for each of the layers
# (try 20 to begin with, then explore other values)
n_neurons = [2, 20,20,20,20,20,20, 3]
# Create the first linear + nonlinear layer which will
# take the 2 input neurons and fully connects it to 20 neurons.
# Use the `utils.linear` function to do this just like before,
# but also remember to give names for each layer, such as
# "1", "2", ... "5", or "layer1", "layer2", ... "layer6".
h1, W1 = utils.linear(X, 20, activation=tf.nn.relu, name='Lay1')
# Create another one:
h2, W2 = utils.linear(h1, 20, activation=tf.nn.relu, name='Lay2')
# and four more (or replace all of this with a loop if you can!):
h3, W3 = utils.linear(h2, 20, activation=tf.nn.relu, name='Lay3')
h4, W4 = utils.linear(h3, 20, activation=tf.nn.relu, name='Lay4')
h5, W5 = utils.linear(h4, 20, activation=tf.nn.relu, name='Lay5')
h6, W6 = utils.linear(h5, 20, activation=tf.nn.relu, name='Lay6')
# Now, make one last layer to make sure your network has 3 outputs:
Y_pred, W7 = utils.linear(h6, 3, activation=None, name='pred')
assert(X.get_shape().as_list() == [None, 2])
assert(Y_pred.get_shape().as_list() == [None, 3])
assert(Y.get_shape().as_list() == [None, 3])
error = np.linspace(0.0, 128.0**2, 100)
loss = error**2.0
plt.plot(error, loss)
plt.xlabel('error')
plt.ylabel('loss')
error = np.linspace(0.0, 1.0, 100)
plt.plot(error, error**2, label='l_2 loss')
plt.plot(error, np.abs(error), label='l_1 loss')
plt.xlabel('error')
plt.ylabel('loss')
plt.legend(loc='lower right')
# first compute the error, the inner part of the summation.
# This should be the l1-norm or l2-norm of the distance
# between each color channel.
error = tf.abs(Y - Y_pred)
assert(error.get_shape().as_list() == [None, 3])
# Now sum the error for each feature in Y.
# If Y is [Batch, Features], the sum should be [Batch]:
sum_error = tf.reduce_sum(error, 1)
assert(sum_error.get_shape().as_list() == [None])
# Finally, compute the cost, as the mean error of the batch.
# This should be a single value.
cost = tf.reduce_mean(sum_error)
assert(cost.get_shape().as_list() == [])
# Refer to the help for the function
optimizer =tf.train.AdamOptimizer(0.001).minimize(cost)
# Create parameters for the number of iterations to run for (< 100)
n_iterations = 250
# And how much data is in each minibatch (< 500)
batch_size = 200
# Then create a session
sess = tf.Session()
# Initialize all your variables and run the operation with your session
sess.run(tf.initialize_all_variables())
# Optimize over a few iterations, each time following the gradient
# a little at a time
imgs = []
costs = []
gif_step = n_iterations // 10
step_i = 0
for it_i in range(n_iterations):
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
# The number of batches we have to iterate over
n_batches = len(idxs) // batch_size
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
# Get just minibatch amount of data
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
# And optimize, also returning the cost so we can monitor
# how our optimization is doing.
training_cost = sess.run(
[cost, optimizer],
feed_dict={X: xs[idxs_i], Y: ys[idxs_i]})[0]
# Also, every 20 iterations, we'll draw the prediction of our
# input xs, which should try to recreate our image!
if (it_i + 1) % gif_step == 0:
costs.append(training_cost / n_batches)
ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
img = np.clip(ys_pred.reshape(img.shape), 0, 1)
imgs.append(img)
# Plot the cost over time
fig, ax = plt.subplots(1, 2)
ax[0].plot(costs)
ax[0].set_xlabel('Iteration')
ax[0].set_ylabel('Cost')
ax[1].imshow(img)
fig.suptitle('Iteration {}'.format(it_i))
plt.show()
# Save the images as a GIF
_ = gif.build_gif(imgs, saveto='single.gif', show_gif=False)
ipyd.Image(url='single.gif?{}'.format(np.random.rand()),
height=500, width=500)
def build_model(xs, ys, n_neurons, n_layers, activation_fn,
final_activation_fn, cost_type):
xs = np.asarray(xs)
ys = np.asarray(ys)
if xs.ndim != 2:
raise ValueError(
'xs should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
if ys.ndim != 2:
raise ValueError(
'ys should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
n_xs = xs.shape[1]
n_ys = ys.shape[1]
X = tf.placeholder(name='X', shape=[None, n_xs],
dtype=tf.float32)
Y = tf.placeholder(name='Y', shape=[None, n_ys],
dtype=tf.float32)
current_input = X
for layer_i in range(n_layers):
current_input = utils.linear(
current_input, n_neurons,
activation=activation_fn,
name='layer{}'.format(layer_i))[0]
Y_pred = utils.linear(
current_input, n_ys,
activation=final_activation_fn,
name='pred')[0]
if cost_type == 'l1_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.abs(Y - Y_pred), 1))
elif cost_type == 'l2_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.squared_difference(Y, Y_pred), 1))
else:
raise ValueError(
'Unknown cost_type: {}. '.format(
cost_type) + 'Use only "l1_norm" or "l2_norm"')
return {'X': X, 'Y': Y, 'Y_pred': Y_pred, 'cost': cost}
def train(imgs,
learning_rate=0.0001,
batch_size=200,
n_iterations=10,
gif_step=2,
n_neurons=30,
n_layers=10,
activation_fn=tf.nn.relu,
final_activation_fn=tf.nn.tanh,
cost_type='l2_norm'):
N, H, W, C = imgs.shape
all_xs, all_ys = [], []
for img_i, img in enumerate(imgs):
xs, ys = split_image(img)
all_xs.append(np.c_[xs, np.repeat(img_i, [xs.shape[0]])])
all_ys.append(ys)
xs = np.array(all_xs).reshape(-1, 3)
xs = (xs - np.mean(xs, 0)) / np.std(xs, 0)
ys = np.array(all_ys).reshape(-1, 3)
ys = ys / 127.5 - 1
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = build_model(xs, ys, n_neurons, n_layers,
activation_fn, final_activation_fn,
cost_type)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(model['cost'])
sess.run(tf.initialize_all_variables())
gifs = []
costs = []
step_i = 0
for it_i in range(n_iterations):
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
# The number of batches we have to iterate over
n_batches = len(idxs) // batch_size
training_cost = 0
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
# Get just minibatch amount of data
idxs_i = idxs[batch_i * batch_size:
(batch_i + 1) * batch_size]
# And optimize, also returning the cost so we can monitor
# how our optimization is doing.
cost = sess.run(
[model['cost'], optimizer],
feed_dict={model['X']: xs[idxs_i],
model['Y']: ys[idxs_i]})[0]
training_cost += cost
print('iteration {}/{}: cost {}'.format(
it_i + 1, n_iterations, training_cost / n_batches))
# Also, every 20 iterations, we'll draw the prediction of our
# input xs, which should try to recreate our image!
if (it_i + 1) % gif_step == 0:
costs.append(training_cost / n_batches)
ys_pred = model['Y_pred'].eval(
feed_dict={model['X']: xs}, session=sess)
img = ys_pred.reshape(imgs.shape)
gifs.append(img)
return gifs
#celeb_imgs = utils.get_celeb_imgs()
#plt.figure(figsize=(10, 10))
#print (celeb_imgs.shape)
#plt.imshow(utils.montage(celeb_imgs).astype(np.uint8))
# It doesn't have to be 100 images, explore!
#imgs = np.array(celeb_imgs).copy()
dirname = "labdogs"
filenames = [os.path.join(dirname, fname)
for fname in os.listdir(dirname)]
filenames = filenames[:16]
assert(len(filenames) == 16)
myimgs = [plt.imread(fname)[..., :3] for fname in filenames]
myimgs = [utils.imcrop_tosquare(img_i) for img_i in myimgs]
imgs = [resize(img_i, (100, 100)) for img_i in myimgs]
plt.figure(figsize=(10, 10))
#plt.imshow(utils.montage(myimgs).astype(np.uint8))
utils.montage(myimgs)
plt.imshow(myimgs[len(myimgs)-1].astype(np.uint8))
imgs = np.array(myimgs).copy()
# Change the parameters of the train function and
# explore changing the dataset
gifs = train(imgs=imgs)
montage_gifs = [np.clip(utils.montage(
(m * 127.5) + 127.5), 0, 255).astype(np.uint8)
for m in gifs]
_ = gif.build_gif(montage_gifs, saveto='multiple.gif')
ipyd.Image(url='multiple.gif?{}'.format(np.random.rand()),
height=500, width=500)
final = gifs[-1]
final_gif = [np.clip(((m * 127.5) + 127.5), 0, 255).astype(np.uint8) for m in final]
gif.build_gif(final_gif, saveto='final.gif')
ipyd.Image(url='final.gif?{}'.format(np.random.rand()),
height=200, width=200)
# Train a network to produce something, storing every few
# iterations in the variable gifs, then export the training
# over time as a gif.
...
gif.build_gif(montage_gifs, saveto='explore.gif')
ipyd.Image(url='explore.gif?{}'.format(np.random.rand()),
height=500, width=500)
utils.build_submission('session-2.zip',
('reference.png',
'single.gif',
'multiple.gif',
'final.gif',
'session-2.ipynb'),
('explore.gif'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Session 2 - Training a Network w/ Tensorflow
Step2: <a name="assignment-synopsis"></a>
Step3: Remember, having series of linear followed by nonlinear operations is what makes neural networks expressive. By stacking a lot of "linear" + "nonlinear" operations in a series, we can create a deep neural network! Have a look at the output ranges of the above nonlinearity when considering which nonlinearity seems most appropriate. For instance, the relu is always above 0, but does not saturate at any value above 0, meaning it can be anything above 0. That's unlike the sigmoid which does saturate at both 0 and 1, meaning its values for a single output neuron will always be between 0 and 1. Similarly, the tanh saturates at -1 and 1.
Step4: Now multiply the tensor using a new variable, $\textbf{W}$, which has 2 rows and 20 columns, so that when it is left mutiplied by $\textbf{X}$, the output of the multiplication is None x 20, giving you 20 output neurons. Recall that the tf.matmul function takes two arguments, the left hand ($\textbf{W}$) and right hand side ($\textbf{X}$) of a matrix multiplication.
Step5: And add to this result another new variable, $\textbf{b}$, which has [20] dimensions. These values will be added to every output neuron after the multiplication above. Instead of the tf.random_normal_initializer that you used for creating $\textbf{W}$, now use the tf.constant_initializer. Often for bias, you'll set the constant bias initialization to 0 or 1.
Step6: So far we have done
Step7: Now that we've done all of this work, let's stick it inside a function. I've already done this for you and placed it inside the utils module under the function name linear. We've already imported the utils module so we can call it like so, utils.linear(...). The docstring is copied below, and the code itself. Note that this function is slightly different to the one in the lecture. It does not require you to specify n_input, and the input scope is called name. It also has a few more extras in there including automatically converting a 4-d input tensor to a 2-d tensor so that you can fully connect the layer with a matrix multiply (don't worry about what this means if it doesn't make sense!).
Step9: <a name="part-two---image-painting-network"></a>
Step10: In the lecture, I showed how to aggregate the pixel locations and their colors using a loop over every pixel position. I put that code into a function split_image below. Feel free to experiment with other features for xs or ys.
Step11: Let's use this function to create the inputs (xs) and outputs (ys) to our network as the pixel locations (xs) and their colors (ys)
Step12: Also remember, we should normalize our input values!
Step13: Similarly for the output
Step14: We'll normalize the output using a simpler normalization method, since we know the values range from 0-255
Step15: Scaling the image values like this has the advantage that it is still interpretable as an image, unlike if we have negative values.
Step16: But when we give inputs of (row, col) to our network, it won't know what order they are, because we will randomize them. So it will have to learn what color value should be output for any given (row, col).
Step17: Now create a deep neural network that takes your network input $\textbf{X}$ of 2 neurons, multiplies it by a linear and non-linear transformation which makes its shape [None, 20], meaning it will have 20 output neurons. Then repeat the same process again to give you 20 neurons again, and then again and again until you've done 6 layers of 20 neurons. Then finally one last layer which will output 3 neurons, your predicted output, which I've been denoting mathematically as $\hat{\textbf{Y}}$, for a total of 6 hidden layers, or 8 layers total including the input and output layers. Mathematically, we'll be creating a deep neural network that looks just like the previous fully connected layer we've created, but with a few more connections. So recall the first layer's connection is
Step18: <a name="cost-function"></a>
Step19: This is known as the $l_2$ (pronounced el-two) loss. It doesn't penalize small errors as much as it does large errors. This is easier to see when we compare it with another common loss, the $l_1$ (el-one) loss. It is linear in error, by taking the absolute value of the error. We'll compare the $l_1$ loss with normalized values from $0$ to $1$. So instead of having $0$ to $255$ for our RGB values, we'd have $0$ to $1$, simply by dividing our color values by $255.0$.
Step20: So unlike the $l_2$ loss, the $l_1$ loss is really quickly upset if there is any error at all
Step21: <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
Step22: <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
Step23: We now need an optimizer which will take our cost and a learning_rate, which says how far along the gradient to move. This optimizer calculates all the gradients in our network with respect to the cost variable and updates all of the weights in our network using backpropagation. We'll then create mini-batches of our training data and run the optimizer using a session.
Step24: We'll now train our network! The code below should do this for you if you've setup everything else properly. Please read through this and make sure you understand each step! Note that this can take a VERY LONG time depending on the size of your image (make it < 100 x 100 pixels), the number of neurons per layer (e.g. < 30), the number of layers (e.g. < 8), and number of iterations (< 1000). Welcome to Deep Learning
Step25: Let's now display the GIF we've just created
Step26: <a name="explore"></a>
Step27: <a name="code-1"></a>
Step28: Explore changing the parameters of the train function and your own dataset of images. Note, you do not have to use the dataset from the last assignment! Explore different numbers of images, whatever you prefer.
Step29: Now we'll create a gif out of the training process. Be sure to call this 'multiple.gif' for your homework submission
Step30: And show it in the notebook
Step31: What we're seeing is the training process over time. We feed in our xs, which consist of the pixel values of each of our 100 images, it goes through the neural network, and out come predicted color values for every possible input value. We visualize it above as a gif by seeing how at each iteration the network has predicted the entire space of the inputs. We can visualize just the last iteration as a "latent" space, going from the first image (the top left image in the montage), to the last image, (the bottom right image).
Step32: <a name="part-four---open-exploration-extra-credit"></a>
Step33: <a name="assignment-submission"></a>
|
5,425
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from IPython.html import widgets
from IPython.html.widgets import interact
from IPython.display import display
tab1_children = [widgets.ButtonWidget(description="ButtonWidget"),
widgets.CheckboxWidget(description="CheckboxWidget"),
widgets.DropdownWidget(values=[1, 2], description="DropdownWidget"),
widgets.RadioButtonsWidget(values=[1, 2], description="RadioButtonsWidget"),
widgets.SelectWidget(values=[1, 2], description="SelectWidget"),
widgets.TextWidget(description="TextWidget"),
widgets.TextareaWidget(description="TextareaWidget"),
widgets.ToggleButtonWidget(description="ToggleButtonWidget"),
widgets.ToggleButtonsWidget(values=["Value 1", "Value2"], description="ToggleButtonsWidget"),
]
tab2_children = [widgets.BoundedFloatTextWidget(description="BoundedFloatTextWidget"),
widgets.BoundedIntTextWidget(description="BoundedIntTextWidget"),
widgets.FloatSliderWidget(description="FloatSliderWidget"),
widgets.FloatTextWidget(description="FloatTextWidget"),
widgets.IntSliderWidget(description="IntSliderWidget"),
widgets.IntTextWidget(description="IntTextWidget"),
]
tab1 = widgets.ContainerWidget(children=tab1_children)
tab2 = widgets.ContainerWidget(children=tab2_children)
i = widgets.AccordionWidget(children=[tab1, tab2])
i.set_title(0,"Basic Widgets")
i.set_title(1,"Numbers Input")
display(i)
def factorial(x):
print "%s!= %s" % (x,np.math.factorial(x))
def factorial2(x):
if type(x) == int:
if x >= 0:
print np.prod(np.arange(1,x+1))
else:
print "ERROR: Number must be positive"
else:
print "ERROR: Only interger is allowed"
factorial(3)
i = interact(factorial, x=(0,100))
#This function plot x, y and adds a title
def plt_arrays(x, y, title="", color="red", linestyle="dashed", linewidth=2):
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(x,y, color=color, linestyle=linestyle, linewidth=linewidth)
axes.set_title(title)
axes.grid()
plt.show()
def f(a, b, c, d, **kwargs):
x=np.linspace(-10, 10, 20)
y = a*(x**3) + b*(x**2) + c*x + d
title="$f(x) = (%s)x^{3} + (%s)x^{2} + (%s)x + (%s)$" % (a,b,c,d)
plt_arrays(x,y, title=title, **kwargs)
#Define Constants
a=0.25
b=2
c=-4
d=0
f(a, b, c, d)
i = interact(f,
a=(-10.,10),
b=(-10.,10),
c=(-10.,10),
d=(-10.,10),
color = ["red", "blue", "green"],
linestyle=["solid", "dashed"],
linewidth=(1,5)
)
i.widget
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build-it Widgets
Step2: Simple Example
Step3: Now we will test it using a code cell
Step4: Using interact function
Step5: Controlling a Chart
Step6: We will define a function that return the following
Step7: Displaying a widget from interact function
|
5,426
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
5,427
|
<ASSISTANT_TASK:>
Python Code:
from pyturb.gas_models import GasMixture
gas_mix = GasMixture(gas_model='Perfect')
gas_mix.add_gas('O2', mass=0.5)
gas_mix.add_gas('H2', mass=0.5)
gas_mix.mixture_gases
gas_mix2 = GasMixture(gas_model='Perfect')
gas_mix2.add_gas('O2', moles=0.5)
gas_mix2.add_gas('H2', moles=0.5)
gas_mix2.mixture_gases
gas_mix3 = GasMixture(gas_model='Perfect')
gas_mix3.add_gas('O2', mass=0.5)
gas_mix3.add_gas('H2', moles=0.121227)
gas_mix3.mixture_gases
from pyturb.gas_models import PerfectIdealGas
air_perfgas = PerfectIdealGas('Air')
print(air_perfgas.thermo_prop)
pyturb_mix = GasMixture('Perfect')
pyturb_mix.add_gas('O2', 0.209476)
pyturb_mix.add_gas('N2', 0.78084)
pyturb_mix.add_gas('Ar', 0.009365)
pyturb_mix.add_gas('CO2', 0.000319)
pyturb_mix.mixture_gases
print('pyTurb air mixture: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(pyturb_mix.Rg, pyturb_mix.cp(), pyturb_mix.cv(), pyturb_mix.gamma()))
print('Perfect air: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(air_perfgas.Rg, air_perfgas.cp(), air_perfgas.cv(), air_perfgas.gamma()))
# Objective temperature:
T = 1500 #K
# Gas mixture:
pyturb_mix_sp = GasMixture('Semiperfect')
pyturb_mix_sp.add_gas('O2', 0.209476)
pyturb_mix_sp.add_gas('N2', 0.78084)
pyturb_mix_sp.add_gas('Ar', 0.009365)
pyturb_mix_sp.add_gas('CO2', 0.000319)
print('pyTurb air mixture: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(pyturb_mix_sp.Rg, pyturb_mix_sp.cp(T), pyturb_mix_sp.cv(T), pyturb_mix_sp.gamma(T)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To inspect the gas mixture contidions, we can use Pandas Dataframe contained in gas_mixture
Step2: Note that the gas_mixture dataframe contains the information of the mixture
Step3: One can also define the mixture defining some pure substances as moles and some as mass
Step4: Note that gas_mix and gas_mix3 are equivalent.
Step5: And now, applying a mixture of molar quantities (per unit mole)
Step6: Therefore, the mixture is composed of
Step7: Where the gas constant, heat capacity at constant pressure, heat capacity at constant volume and the heat capacity ratio are
Step8: Semiperfect Gas Mixture
|
5,428
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import theano
import theano.tensor as tt
import kalman
# True values
T = 500 # Time steps
sigma2_eps0 = 3 # Variance of the observation noise
sigma2_eta0 = 10 # Variance in the update of the mean
# Simulate data
np.random.seed(12345)
eps = np.random.normal(scale=sigma2_eps0**0.5, size=T)
eta = np.random.normal(scale=sigma2_eta0**0.5, size=T)
mu = np.cumsum(eta)
y = mu + eps
# Upon using pymc3, the following theano configuration flag is changed,
# leading to tensors being required to have test values
#theano.config.compute_test_value = 'ignore'
# Tensors for the measurement equation
Z = tt.dmatrix(name='Z')
d = tt.dvector(name='d')
H = tt.dmatrix(name='H')
# Tensors for the transition equation
T = tt.dmatrix(name='T')
c = tt.dvector(name='c')
R = tt.dmatrix(name='R')
Q = tt.dmatrix(name='Q')
# Initial position and uncertainty
a0 = tt.dvector(name='a0')
P0 = tt.dmatrix(name='P0')
ɛ_σ2 = 3.
η_σ2 = 10.
args = dict(Z = np.array([[1.]]),
d = np.array([0.]),
H = np.array([[ɛ_σ2]]),
T = np.array([[1.]]),
c = np.array([0.]),
R = np.array([[1.]]),
Q = np.array([[η_σ2]]),
a0 = np.array([0.]),
P0 = np.array([[1e6]]))
kalmanTheano = kalman.KalmanTheano(Z, d, H, T, c, R, Q, a0, P0)
(at, Pt, lliks), updates = kalmanTheano.filter(y[:,None])
f = theano.function([Z, d, H, T, c, R, Q, a0, P0], lliks)
llik = f(**args)
llik[1:].sum()
print('Measuring time...')
%timeit f(**args)
Y0 = tt.dvector(name='Y0')
_,_,llik = kalman.core._oneStep(Y0, Z, d, H, T, c, R, Q, a0, P0)
profiler = theano.compile.ScanProfileStats()
f = theano.function([Y0, Z, d, H, T, c, R, Q, a0, P0], llik, profile=profiler)
f(y[0,None], **args);
profiler.summary()
profiler = theano.compile.ScanProfileStats()
(_,_,llik),_ = kalmanTheano.filter(y[:,None], profile=profiler)
f = theano.function([Z, d, H, T, c, R, Q, a0, P0], llik, profile=profiler)
f(**args);
# Select the node corresponding to the scan operation
scan_op = next(k for k in profiler.op_nodes()
if isinstance(k, theano.scan_module.scan_op.Scan))
scan_op.profile.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use the same data as in the 01_RandomWalkPlusObservation notebook.
Step2: Next, we create all the tensors required to describe our model
Step3: We will also create some actual values for them
Step4: Let's calculate the likelihood of the observed values, given the parameters above
Step5: Time required for the log-likelihood calculation
Step6: Profiling a non-scan operation is relatively simple. As an example, let's create a function to calculate the first time step of the Kalman filter
Step7: Repeating the procedure with a scan procedure, we can see that the code inside it is not profiled. It took me a while to make it work (not even stackoverflow helped!!!). In the end, this is how I made it work
|
5,429
|
<ASSISTANT_TASK:>
Python Code:
import supp_functions as fce
import xarray as xr
import pandas as pd
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
s_year = 1979
e_year = 2009
vari ='t'
in_dir = '~/'
in_netcdf = in_dir + 'jra55_tmp_1960_2009_zm.nc'
ds = xr.open_dataset(in_netcdf)
times = pd.date_range(str(s_year)+'-01-01', str(e_year)+'-12-31', name='time', freq = 'M')
ds_sel = ds.sel(time = times, method='ffill') #nearest
ds_sel = ds_sel[vari]
climatology = ds_sel.groupby('time.month').mean('time')
anomalies = ds_sel.groupby('time.month') - climatology
global reg
solar = fce.open_reg_ccmi(in_dir+'solar_1947.nc', 'solar', 0, 1947, s_year, e_year)
solar /= 126.6
trend = np.linspace(-1, 1, solar.shape[0])
norm = 4
what_re = 'jra55'
what_sp = ''
i_year2 = 1947
i_year = 1960
what_re2 = 'HadISST'
saod = fce.open_reg_ccmi(in_dir+'sad_gm_50hPa_1949_2013.nc', 'sad', 0, 1949, s_year, e_year)
qbo1 = fce.open_reg_ccmi(in_dir+'qbo_'+what_re+what_sp+'_pc1.nc', 'index', norm, i_year, s_year, e_year)
qbo2 = fce.open_reg_ccmi(in_dir+'qbo_'+what_re+what_sp+'_pc2.nc', 'index', norm, i_year, s_year, e_year)
enso = fce.open_reg_ccmi(in_dir+'enso_'+what_re2+'_monthly_'+str(i_year2)+'_'+str(e_year)+'.nc', \
'enso', norm, i_year2, s_year, e_year)
print(trend.shape, solar.shape, saod.shape, enso.shape, qbo1.shape, qbo2.shape, anomalies.time.shape)
reg = np.column_stack((trend, solar, qbo1, qbo2, saod, enso))
def xr_regression(y):
X = sm.add_constant(reg, prepend=True) # regressor matrix
mod = sm.GLSAR(y.values, X, 2, missing = 'drop') # MLR analysis with AR2 modeling
res = mod.iterative_fit()
return xr.DataArray(res.params[1:])
stacked = anomalies.stack(allpoints = ['lev', 'lat']).squeeze()
stacked = stacked.reset_coords(drop=True)
coefs = stacked.groupby('allpoints').apply(xr_regression)
coefs_unstacked = coefs.unstack('allpoints')
%matplotlib inline
coefs_unstacked.isel(dim_0 = [1]).squeeze().plot.contourf(yincrease=False)#, vmin=-1, vmax=1, cmap=plt.cm.RdBu_r)
coefs_unstacked.isel(dim_0 = [1]).squeeze().plot.contour(yincrease=False, colors='k', add_colorbar=False, \
levels = [-0.5, -0.2,-0.1,0,0.1,0.2, 0.5])
plt.yscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data opening
Step2: Variable and period of analysis selection
Step3: Deseasonalizing
Step4: Regressor loading
Step5: Regression function
Step6: Regression calculation
Step7: Visualization
|
5,430
|
<ASSISTANT_TASK:>
Python Code:
from numpy.random import standard_normal # Gaussian variables
N = 1000; P = 5
X = standard_normal((N, P))
W = X - X.mean(axis=0,keepdims=True)
print(dot(W[:,0], W[:,1]))
from sklearn.decomposition import PCA
S=PCA(whiten=True).fit_transform(X)
print(dot(S[:,0], S[:,1]))
from numpy.random import standard_normal
from matplotlib.patches import Ellipse
from numpy.linalg import svd
@interact
def plot_2d_pca(mu_x=FloatSlider(min=-3.0, max=3.0, value=0),
mu_y=FloatSlider(min=-3.0, max=3.0, value=0),
sigma_x=FloatSlider(min=0.2, max=1.8, value=1.8),
sigma_y=FloatSlider(min=0.2, max=1.8, value=0.3),
theta=FloatSlider(min=0.0, max=pi, value=pi/6), center=False):
mu=array([mu_x, mu_y])
sigma=array([sigma_x, sigma_y])
R=array([[cos(theta),-sin(theta)],[sin(theta),cos(theta)]])
X=dot(standard_normal((1000, 2)) * sigma[newaxis,:],R.T) + mu[newaxis,:]
# Plot the points and the ellipse
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(X[:200,0], X[:200,1], marker='.')
ax.grid()
M=8.0
ax.set_xlim([-M,M])
ax.set_ylim([-M,M])
e=Ellipse(xy=array([mu_x, mu_y]), width=sigma_x*3, height=sigma_y*3, angle=theta/pi*180,
facecolor=[1.0,0,0], alpha=0.3)
ax.add_artist(e)
# Perform PCA and plot the vectors
if center:
X_mean=X.mean(axis=0,keepdims=True)
else:
X_mean=zeros((1,2))
# Doing PCA here... I'm using svd instead of scikit-learn PCA, we'll come back to this.
U,s,V =svd(X-X_mean, full_matrices=False)
for v in dot(diag(s/sqrt(X.shape[0])),V): # Each eigenvector
ax.arrow(X_mean[0,0],X_mean[0,1],-v[0],-v[1],
head_width=0.5, head_length=0.5, fc='b', ec='b')
Ustd=U.std(axis=0)
ax.set_title('std(U*s) [%f,%f]' % (Ustd[0]*s[0],Ustd[1]*s[1]))
import pickle
dataset=pickle.load(open('data/cafe.pkl','r')) # or 'pofa.pkl' for POFA
disp('dataset.images shape is %s' % str(dataset.images.shape))
disp('dataset.data shape is %s' % str(dataset.data.shape))
@interact
def plot_face(image_id=(0, dataset.images.shape[0]-1)):
plt.imshow(dataset.images[image_id],cmap='gray')
plt.title('Image Id = %d, Gender = %d' % (dataset.target[image_id], dataset.gender[image_id]))
plt.axis('off')
X=dataset.data.copy() # So that we won't mess up the data in the dataset\
X_mean=X.mean(axis=0,keepdims=True) # Mean for each dimension across sample (centering)
X_std=X.std(axis=0,keepdims=True)
X-=X_mean
disp(all(abs(X.mean(axis=0))<1e-12)) # Are means for all dimensions very close to zero?
from numpy.linalg import svd
U,s,V=svd(X,compute_uv=True, full_matrices=False)
disp(str(U.shape))
disp(str(s.shape))
disp(str(V.shape))
variance_ratio=s**2/(s**2).sum() # Normalized so that they add to one.
@interact
def plot_variance_ratio(n_components=(1, len(variance_ratio))):
n=n_components-1
fig, axs = plt.subplots(1, 2, figsize=(12, 5))
axs[0].plot(variance_ratio)
axs[0].set_title('Explained Variance Ratio')
axs[0].set_xlabel('n_components')
axs[0].axvline(n, color='r', linestyle='--')
axs[0].axhline(variance_ratio[n], color='r', linestyle='--')
axs[1].plot(cumsum(variance_ratio))
axs[1].set_xlabel('n_components')
axs[1].set_title('Cumulative Sum')
captured=cumsum(variance_ratio)[n]
axs[1].axvline(n, color='r', linestyle='--')
axs[1].axhline(captured, color='r', linestyle='--')
axs[1].annotate(s='%f%% with %d components' % (captured * 100, n_components), xy=(n, captured),
xytext=(10, 0.5), arrowprops=dict(arrowstyle="->"))
image_shape=dataset.images.shape[1:] # (H x W)
@interact
def plot_eigenface(eigenface=(0, V.shape[0]-1)):
v=V[eigenface]*X_std
plt.imshow(v.reshape(image_shape), cmap='gray')
plt.title('Eigenface %d (%f to %f)' % (eigenface, v.min(), v.max()))
plt.axis('off')
@interact
def plot_reconstruction(image_id=(0,dataset.images.shape[0]-1), n_components=(0, V.shape[0]-1),
pc1_multiplier=FloatSlider(min=-2,max=2, value=1)):
# This is where we perform the projection and un-projection
Vn=V[:n_components]
M=ones(n_components)
if n_components > 0:
M[0]=pc1_multiplier
X_hat=dot(multiply(dot(X[image_id], Vn.T), M), Vn)
# Un-center
I=X[image_id] + X_mean
I_hat = X_hat + X_mean
D=multiply(I-I_hat,I-I_hat) / multiply(X_std, X_std)
# And plot
fig, axs = plt.subplots(1, 3, figsize=(10, 10))
axs[0].imshow(I.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[0].axis('off')
axs[0].set_title('Original')
axs[1].imshow(I_hat.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[1].axis('off')
axs[1].set_title('Reconstruction')
axs[2].imshow(1-D.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[2].axis('off')
axs[2].set_title('Difference^2 (mean = %f)' % sqrt(D.mean()))
plt.tight_layout()
def plot_morph(left=0, right=1, mix=0.5):
# Projected images
x_lft=dot(X[left], V.T)
x_rgt=dot(X[right], V.T)
# Mix
x_avg = x_lft * (1.0-mix) + x_rgt * (mix)
# Un-project
X_hat = dot(x_avg[newaxis,:], V)
I_hat = X_hat + X_mean
# And plot
fig, axs = plt.subplots(1, 3, figsize=(10, 10))
axs[0].imshow(dataset.images[left], cmap='gray', vmin=0, vmax=1)
axs[0].axis('off')
axs[0].set_title('Left')
axs[1].imshow(I_hat.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[1].axis('off')
axs[1].set_title('Morphed (%.2f %% right)' % (mix * 100))
axs[2].imshow(dataset.images[right], cmap='gray', vmin=0, vmax=1)
axs[2].axis('off')
axs[2].set_title('Right')
plt.tight_layout()
interact(plot_morph,
left=IntSlider(max=dataset.images.shape[0]-1),
right=IntSlider(max=dataset.images.shape[0]-1,value=1),
mix=FloatSlider(value=0.5, min=0, max=1.0))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I'll skip ahead and use a pre-canned PCA routine from scikit-learn (but we'll dig into it a bit later!) Let's see what happens to the transformed variables, ${\bf S}$
Step2: Another way to look at ${\bf V}$ is to think of them as projections. Since the row vectors of ${\bf V}$ is orthogonal to each other, the projected data ${\bf S}$ lines in a new "coordinate system" specified by ${\bf V}$. Furthermore, the new coordinate system is sorted in the decreasing order of variance in the original data. So, PCA can be thought of as calculating a new coordinate system where the basis vectors point toward the direction of largest variances first.
Step3: Yet another use for ${\bf V}$ is to perform a dimensionality reduction. In many scenarios you encounter in image manipulation (as we'll see soon), we might want to have a more concise representation of the data ${\bf X}$. PCA with $K < P$ is one way to reduce the dimesionality
Step4: Preprocessing
Step5: Then we perform SVD to calculate the projection matrix $V$. By default, U,s,V=svd(...) returns full matrices, which will return $n \times n$ matrix U, $n$-dimensional vector of singular values s, and $d \times d$ matrix V. But here, we don't really need $d \times d$ matrix V; with full_matrices=False, svd only returns $n \times d$ matrix for V.
Step6: We can also plot how much each eigenvector in V contributes to the overall variance by plotting variance_ratio = $\frac{s^2}{\sum s^2}$. (Notice that s is already in the decreasing order.) The cumsum (cumulative sum) of variance_ratio then shows how much of the variance is explained by components up to n_components.
Step7: Since we're dealing with face data, each row vector of ${\bf V}$ is called an "eigenface". The first "eigenface" is the one that explains a lot of variances in the data, whereas the last one explains the least.
Step8: Now let's try reconstructing faces with different number of principal components (PCs)! Now, the transformed X is reconstructed by multiplying by the sample standard deviations for each dimension and adding the sample mean. For this reason, even for zero components, you get a face-like image!
Step9: Image morphing
|
5,431
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
import pyensae.datasource
pyensae.datasource.download_data("matrix_distance_7398.zip", website = "xd")
import pandas
df = pandas.read_csv("matrix_distance_7398.txt", sep="\t", header=None, names=["v1","v2","distance"])
df.head()
matrice = df.values
matrice[:5]
with open ("matrix_distance_7398.txt", "r") as f :
matrice = [ row.strip(' \n').split('\t') for row in f.readlines() ]
for row in matrice:
row[2] = float(row[2])
print(matrice[:5])
import random
skieurs = [ random.gauss(1.75, 0.1) for i in range(0,10) ]
paires = [ random.gauss(1.75, 0.1) for i in range(0,15) ]
skieurs.sort()
paires.sort()
print(skieurs)
print(paires)
import pyensae.datasource
files = pyensae.datasource.download_data("facebook.tar.gz",website="http://snap.stanford.edu/data/")
fe = [ f for f in files if "edge" in f ]
fe
import pandas
df = pandas.read_csv("facebook/1912.edges", sep=" ", names=["v1","v2"])
print(df.shape)
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: La programmation dynamique est une façon de résoudre de manière similaire une classe de problèmes d'optimisation qui vérifie la même propriété. On suppose qu'il est possible de découper le problème $P$ en plusieurs parties $P_1$, $P_2$, ... Si $S$ est la solution optimale du problème $P$, alors chaque partie $S_1$, $S_2$, ... de cette solution appliquée aux sous-problèmes est aussi optimale.
Step2: On peut lire ce fichier soit avec le module pandas introduit lors de la séance 10 TD 10
Step3: Le membre values se comporte comme une matrice, une liste de listes
Step4: On peut aussi utiliser le petit exemple qui a été présenté lors de la séance 4 sur les fichiers TD 4
Step5: Chaque ligne définit un voyage entre deux villes effectué d'une traite, sans étape. Les accents ont été supprimés du fichier.
Step6: Exercice 7
Step7: Il faut décompresser ce fichier avec 7zip si vous utilisez pysense < 0.8. Sous Linux (et Mac), il faudra utiliser une commande décrite ici tar.
|
5,432
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
import os
filename = 'World_population_estimates.html'
if not os.path.exists(filename):
!wget https://raw.githubusercontent.com/AllenDowney/ModSimPy/master/data/World_population_estimates.html
from pandas import read_html
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
census = table2.census / 1e9
from modsim import TimeSeries
def run_simulation(system, growth_func):
Simulate the system using any update function.
system: System object
growth_func: function that computes the population next year
returns: TimeSeries
results = TimeSeries()
results[system.t_0] = system.p_0
for t in range(system.t_0, system.t_end):
growth = growth_func(results[t], t, system)
results[t+1] = results[t] + growth
return results
from modsim import decorate
def plot_estimates():
census.plot(style=':', label='US Census')
un.plot(style='--', label='UN DESA')
decorate(xlabel='Year',
ylabel='World population (billion)')
def growth_func_quad(pop, t, system):
return system.alpha * pop + system.beta * pop**2
from modsim import System
t_0 = census.index[0]
p_0 = census[t_0]
t_end = census.index[-1]
system = System(t_0=t_0,
p_0=p_0,
t_end=t_end)
system.alpha = 25 / 1000
system.beta = -1.8 / 1000
results = run_simulation(system, growth_func_quad)
results.plot(color='gray', label='model')
plot_estimates()
decorate(title='Quadratic Growth Model')
from numpy import linspace
pop_array = linspace(0, 15, 101)
growth_array = (system.alpha * pop_array +
system.beta * pop_array**2)
from matplotlib.pyplot import plot
plot(pop_array, growth_array, label='growth')
decorate(xlabel='Population (billions)',
ylabel='Net growth (billions)',
title='Growth vs. Population')
-system.alpha / system.beta
def carrying_capacity(system):
K = -system.alpha / system.beta
return K
sys1 = System(alpha=0.025, beta=-0.0018)
pop = carrying_capacity(sys1)
print(pop)
def carrying_capacity():
K = -sys1.alpha / sys1.beta
return K
sys1 = System(alpha=0.025, beta=-0.0018)
pop = carrying_capacity()
print(pop)
# WRONG
def carrying_capacity(system):
system = System(alpha=0.025, beta=-0.0018)
K = -system.alpha / system.beta
return K
sys1 = System(alpha=0.03, beta=-0.002)
pop = carrying_capacity(sys1)
print(pop)
# WRONG
def carrying_capacity(system):
K = -system.alpha / system.beta
sys1 = System(alpha=0.025, beta=-0.0018)
pop = carrying_capacity(sys1)
print(pop)
# Solution
system.r = system.alpha
system.K = -system.alpha/system.beta
system.r, system.K
# Solution
def growth_func_quad2(pop, t, system):
return system.r * pop * (1 - pop / system.K)
# Solution
results2 = run_simulation(system, growth_func_quad2)
results2.plot(color='gray', label='model')
plot_estimates()
decorate(title='Quadratic Growth Model, alternate parameters')
# Solution
p0_array = linspace(1, 25, 11)
for p_0 in p0_array:
system.p_0 = p_0
results3 = run_simulation(system, growth_func_quad)
results3.plot(label='_nolegend')
decorate(xlabel='Year',
ylabel='Population (billions)',
title='Projections with hypothetical starting populations')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the previous chapter we developed a population model where net growth during each time step is proportional to the current population. This model seems more realistic than the constant growth model, but it does not fit the data as well.
Step3: And here are the functions from the previous chapter.
Step4: Quadratic growth
Step5: Here's the System object we'll use, initialized with t_0, p_0, and t_end.
Step6: Now we have to add the parameters alpha and beta .
Step7: And here's how we run it
Step8: And here are the results.
Step9: The model fits the data well over the whole range, with just a bit of space between them in the 1960s.
Step10: Now I'll use the quadratic model to compute net growth for each population.
Step11: To plot the growth rate versus population, we can import the plot function from Matplotlib
Step12: And use it like this.
Step13: Note that the x-axis is not time, as in the previous figures, but population. We can divide this curve into four regimes of behavior
Step14: With these parameters, net growth is 0 when the population is about 13.9 billion.
Step15: Now let's see all the ways that can go wrong.
Step16: This version actually works, but it is not as versatile as it could be.
Step17: In this example, we have a System object named sys1 that gets passed
Step18: A function that doesn't have a return statement actually returns a special value called None, so in this example the value of pop is None. If you are debugging a program and find that the value of a variable is None when it shouldn't be, a function without a return statement is a likely cause.
Step19: Exercise
|
5,433
|
<ASSISTANT_TASK:>
Python Code:
dataset = nilmtk.DataSet('/data/mine/vadeec/merged/ukdale.h5')
dataset.set_window("2014-06-01", "2014-07-01")
BUILDING = 1
elec = dataset.buildings[BUILDING].elec
fridge = elec['fridge']
activations = fridge.get_activations()
print("Number of activations =", len(activations))
activations[1].plot()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, to speed up processing, we'll set a "window of interest" so NILMTK will only consider one month of data.
Step2: Get the ElecMeter associated with the Fridge in House 1
Step3: Now load the activations
|
5,434
|
<ASSISTANT_TASK:>
Python Code:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train[:100]
y_train = y_train[:100]
print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# Initialize the image regressor.
reg = ak.ImageRegressor(overwrite=True, max_trials=1)
# Feed the image regressor with training data.
reg.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(x_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))
reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=2,
)
split = 50000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=2,
)
input_node = ak.ImageInput()
output_node = ak.ImageBlock(
# Only search ResNet architectures.
block_type="resnet",
# Normalize the dataset.
normalize=False,
# Do not do data augmentation.
augment=False,
)(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
input_node = ak.ImageInput()
output_node = ak.Normalization()(input_node)
output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node)
output_node = ak.ResNetBlock(version="v2")(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshape the images to have the channel dimension.
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
y_train = y_train.reshape(y_train.shape + (1,))
y_test = y_test.reshape(y_test.shape + (1,))
print(x_train.shape) # (60000, 28, 28, 1)
print(y_train.shape) # (60000, 10)
train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,)))
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,)))
reg = ak.ImageRegressor(overwrite=True, max_trials=1)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The second step is to run the ImageRegressor. It is recommended have more
Step2: Validation Data
Step3: You can also use your own validation set instead of splitting it from the
Step4: Customized Search Space
Step5: The usage of AutoModel is similar to the functional API of Keras. Basically,
Step6: Data Format
|
5,435
|
<ASSISTANT_TASK:>
Python Code:
import os
PATH="/Users/david/Desktop/CourseWork/TheArtOfDataScience/claritycontrol/code/scripts/" # use your own path
os.chdir(PATH)
import clarity as cl # I wrote this module for easier operations on data
import clarity.resources as rs
import csv,gc # garbage memory collection :)
import numpy as np
import matplotlib.pyplot as plt
import jgraph as ig
%matplotlib inline
# settings for histogram
BINS=32 # histogram bins
RANGE=(10.0,300.0)
for token in rs.TOKENS:
c = cl.Clarity(token)
fname = rs.HIST_DATA_PATH+token+".csv"
hist, bin_edges = c.loadImg().getHistogram(bins=BINS,range=RANGE,density=False)
np.savetxt(fname,hist,delimiter=',')
print fname,"saved."
del c
gc.collect()
import numpy as np
import clarity.resources as rs
features = np.empty(shape=(1,BINS))
for token in rs.TOKENS:
fname = rs.HIST_DATA_PATH+token+".csv"
data = np.loadtxt(fname,delimiter=',')
features = np.vstack([features,data])
features = features[1:,]
minc = np.min(features)
maxc = np.max(features)
features = (features-minc)/(maxc-minc)
print features
np.savetxt(rs.HIST_DATA_PATH+"features.csv",features,delimiter=',')
from sklearn import cross_validation
from sklearn.cross_validation import LeaveOneOut
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
%matplotlib inline
np.random.seed(12345678) # for reproducibility, set random seed
# Cocaine = ["Cocaine174","Cocaine175","Cocaine178"]
# Control = ["Control181","Control182","Control189","Control239","Control258"]
# Fear = ["Fear187","Fear197","Fear199","Fear200"]
features = np.loadtxt(rs.HIST_DATA_PATH+"features.csv",delimiter=',')
temp_mu = np.mean(features,axis=1)
temp_std = np.std(features,axis=1)
mu = [np.mean(temp_mu[0:3]),np.mean(temp_mu[3:8]),np.mean(temp_mu[8:12])]
std = [np.mean(temp_std[0:3]),np.mean(temp_std[3:8]),np.mean(temp_std[8:12])]
print mu
print std
std=[1,1,1]
# define number of subjects per class
S = np.array((9, 21, 30, 39, 45, 63, 81, 96, 108, 210, 333))
names = ["Nearest Neighbors", "Linear SVM", "Random Forest",
"Linear Discriminant Analysis", "Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
LinearDiscriminantAnalysis()]
# QuadraticDiscriminantAnalysis()]
accuracy = np.zeros((len(S), len(classifiers), 2), dtype=np.dtype('float64'))
for idx1, s in enumerate(S):
s0=s/3
s1=s/3
s2=s/3
x0 = np.random.normal(mu[0],std[0],(s0,BINS))
x1 = np.random.normal(mu[1],std[1],(s1,BINS))
x2 = np.random.normal(mu[2],std[2],(s2,BINS))
X = x0
X = np.vstack([X,x1])
X = np.vstack([X,x2])
y = np.append(np.append(np.zeros(s0), np.ones(s1)),np.ones(s2)*2)
for idx2, cla in enumerate(classifiers):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=0)
clf = cla.fit(X_train, y_train)
loo = LeaveOneOut(len(X))
scores = cross_validation.cross_val_score(clf, X, y, cv=loo)
accuracy[idx1, idx2,] = [scores.mean(), scores.std()]
print("Accuracy of %s: %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
print accuracy
plt.errorbar(S, accuracy[:,0,0], yerr = accuracy[:,0,1], hold=True, label=names[0])
plt.errorbar(S, accuracy[:,1,0], yerr = accuracy[:,1,1], color='green', hold=True, label=names[1])
plt.errorbar(S, accuracy[:,2,0], yerr = accuracy[:,2,1], color='red', hold=True, label=names[2])
plt.errorbar(S, accuracy[:,3,0], yerr = accuracy[:,3,1], color='black', hold=True, label=names[3])
# plt.errorbar(S, accuracy[:,4,0], yerr = accuracy[:,4,1], color='brown', hold=True, label=names[4])
plt.xscale('log')
plt.xlabel('number of samples')
plt.ylabel('accuracy')
plt.title('Accuracy of classification under simulated data')
plt.axhline(1, color='red', linestyle='--')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
y=np.array([0,0,0,1,1,1,1,1,2,2,2,2])
features = np.loadtxt(rs.HIST_DATA_PATH+"features.csv",delimiter=',')
accuracy=np.zeros((len(classifiers),2))
for idx, cla in enumerate(classifiers):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, y, test_size=0.4, random_state=0)
clf = cla.fit(X_train, y_train)
loo = LeaveOneOut(len(features))
scores = cross_validation.cross_val_score(clf, features, y, cv=loo)
accuracy[idx,] = [scores.mean(), scores.std()]
print("Accuracy of %s: %0.2f (+/- %0.2f)" % (names[idx], scores.mean(), scores.std() * 2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Histogram data preparation
Step2: Scale data
Step3: Setup Step
Step4: Steps 4 & 5
Step5: Step 6
Step6: Step 7
|
5,436
|
<ASSISTANT_TASK:>
Python Code:
from pypot.creatures import PoppyErgoJr
poppy = PoppyErgoJr(use_http=True, use_snap=True)
# If you want to use another robot (humanoid, torso, ...) adapt this code
#from pypot.creatures import PoppyTorso
#poppy = PoppyTorso(use_http=True, use_snap=True)
# If you want to use the robot with the camera unpluged,
# you have to pass the argument camera='dummy
#poppy = PoppyErgoJr(camera='dummy', use_http=True, use_snap=True)
# If you want to use a simulated robot in the 3D web viewer aka "poppy simu"
# you have to pass the argument simulator='poppy-simu'
#poppy = PoppyErgoJr(simulator='poppy-simu', use_http=True, use_snap=True)
import requests
from IPython.core.display import HTML
#Testing Snap API access
valid_url_for_snap_server=''
try:
response = requests.get('http://poppy.local:6969/')
if response.status_code==200:
valid_url_for_snap_server=response.text
except:
print('http://poppy.local:6969/ is unreachable')
HTML(valid_url_for_snap_server)
def to_api(url, hostname='poppy.local', port='6969'):
url_root='http://{}:{}/'.format(hostname, port)
print('> call:',url_root+url)
try:
response = requests.get(url_root+url)
if response.status_code==200:
return response.text
else:
return 'ERROR'
except:
print('{} is unreachable'.format(url_root))
def get_ip():
return to_api('ip/')
def get_all_positions():
return [float(val) for val in to_api('motors/get/positions').split(';')]
print(get_ip())
print(get_all_positions())
def get_motors_alias():
return to_api('motors/alias').split('/')
def get_motors_name(alias='motors'):
return to_api('motors/'+alias).split('/')
print(get_motors_alias())
print(get_motors_name())
print('these motors: {}, are in group of motors named: {}.'.format(
get_motors_name(get_motors_alias()[0]),
get_motors_alias()[0])
)
def get_register(motor_id, register):
url='motor/m{}/get/{}'.format(motor_id, register)
return to_api(url)
def get_register_list(motor_id=1):
out=get_register(motor_id, 'registers')
if 'ERROR' in out: return out
else: return eval(out) #type == list
def get_position(motor_id):
out=get_register(motor_id, 'present_position')
if 'ERROR' in out: return out
else: return float(out)
def get_compliant(motor_id):
out=get_register(motor_id, 'compliant')
if 'ERROR' in out: return out
else: return bool(out)
def get_color(motor_id):
return get_register(motor_id, 'led') #type == str
print('all avalible register are: {}'.format(', '.join(get_register_list())))
print('m1 is in position {}°'.format(get_register(1, 'present_position')))
print('m1 is in position {}°'.format(get_position(1)))
print('m1 compliant register is {}'.format(get_register(1, 'compliant')))
print('m1 compliant register is {}'.format(get_compliant(1)))
print('led of m1 is {}'.format(get_register(1, 'led')))
print('led of m1 is {}'.format(get_color(1)))
#print('motor sensitivity {}'.format([get_position(2)==get_position(2) for _ in range(10)]))
def get_registers(motors_id, register):
if type(motors_id)!=list: return 'Type ERROR'
targets=[]
for motor_id in motors_id:
targets.append('m'+str(motor_id))
url='motors/{}/get/{}'.format(';'.join(targets), register)
return to_api(url).split(';')
def get_positions(motors_id):
out=get_registers(motors_id, 'present_position')
if 'ERROR' in out: return out
else: return [float(val) for val in out]
def get_compliants(motors_id):
out=get_registers(motors_id, 'compliant')
if 'ERROR' in out: return out
else: return [bool(val) for val in out]
def get_colors(motors_id):
out=get_registers(motors_id, 'led')
if 'ERROR' in out: return out
else: return [str(val) for val in out]
print('m1 and m2 are respectively in position {}'.format(get_registers([1,2], 'present_position')))
print('m1 and m2 are respectively in position {}'.format(get_positions([1,2])))
print('m1 and m2 compliant register are respectively {}'.format(get_registers([1,2], 'compliant')))
print('m1 and m2 compliant register are respectively {}'.format(get_compliants([1,2])))
print('led of m1 and m2 are respectively {}'.format(get_registers([1,2], 'led')))
print('led of m1 and m2 are respectively {}'.format(get_colors([1,2])))
def set_register(motor_id, register, value):
url='motor/m{}/set/{}/{} '.format(motor_id, register, value)
return to_api(url)
def set_position(motor_id, position):
return set_register(motor_id, 'goal_position', position)
def set_compliant(motor_id, state):
return set_register(motor_id, 'compliant', state)
def set_color(motor_id, color):
return set_register(motor_id, 'led', color)
#note: the motor must be in the non-compliant state to be control it in position
print('set m1 compliant state to false: {}'.format(set_compliant(1,0)))
print('set m1 position to 15°: {}'.format(set_position(1,15)))
print('set m1 compliant state to true: {}'.format(set_compliant(1,1)))
def valid_registers_input(motors_id, registers, values):
if type(motors_id)!=list or type(registers)!=list or type(values)!=list:
return 'Type ERROR'
if len(motors_id) != len(registers) or len(motors_id) != len(values):
return 'Size ERROR'
return motors_id, registers, values
def set_registers(motors_id, registers, values):
registers_input = valid_registers_input(motors_id, registers, values)
if 'ERROR' in registers_input:
return registers_input
else:
motors_id, registers, values = registers_input
cmd=[]
for i, motor_id in enumerate(motors_id):
cmd.append('m{}:{}:{}'.format(motor_id, registers[i], values[i]))
cmd=';'.join(cmd)
url='motors/set/registers/'+cmd
return to_api(url)
def set_positions(motors_id, positions):
return set_registers(motors_id, ['goal_position']*len(motors_id), positions)
def set_compliants(motors_id, states):
return set_registers(motors_id, ['compliant']*len(motors_id), states)
def set_colors(motors_id, colors):
return set_registers(motors_id, ['led']*len(motors_id), colors)
print(set_compliants([1,2],[1,1]))
print(set_registers([1,1,2,3],['led', 'goal_position', 'goal_position', 'led'],['yellow', 45, 25, 'blue']))
print(set_positions([1],[0]))
print(set_compliants([1,2],[0,0]))
print(set_colors([1,2,3],['green']*3))
'''
prepare input for set_register function:
accept:
python list of values,
str list of values (split by space),
int, float, bool
return: python list of str values
'''
def set_type(value):
if type(value)==str:
value=value.split(' ')
elif type(value) in (float, int, bool):
value=[str(value)]
elif type(value)!=list:
return 'Type ERROR'
else:
for i, v in enumerate(value): value[i]=str(v)
return value
'''
re-write valid_registers_input function
valid_registers_input is use by set_registers function
add set_type function
add check size, accept one value for default for each motor
return couple of tree values, each is a list of str values
'''
number_of_all_motors=len(get_motors_name())
all_valid_register=get_register_list()
def valid_registers_input(motors_id, registers, values):
motors_id, registers, values = set_type(motors_id), set_type(registers), set_type(values)
if 'ERROR' in (motors_id or registers or values):
return 'Type ERROR'
if len(registers) == 1:
registers=registers*len(motors_id)
elif len(motors_id) != len(registers):
return 'Size ERROR'
if len(values) == 1:
values=values*len(motors_id)
elif len(motors_id) != len(values):
return 'Size ERROR'
number_of_motors=number_of_all_motors
valid_register=all_valid_register
#assume that value of values variable are check before
for i, motor_id in enumerate(motors_id):
if int(motor_id) <1 or int(motor_id) > number_of_motors or registers[i] not in valid_register:
return 'Value ERROR'
return motors_id, registers, values
'''
No need to re-write set_registers function
but get_registers function need to:
add set_type function to avoid error
add check values
'''
def get_registers(motors_id, register):
motors_id=set_type(motors_id)
if 'ERROR' in motors_id: return motors_id
valid_register=all_valid_register
if register not in valid_register: return 'Value ERROR'
number_of_motors=number_of_all_motors
targets=[]
for i, motor_id in enumerate(motors_id):
if int(motor_id) <1 or int(motor_id) > number_of_motors:
return 'Value ERROR'
else:
targets.append('m'+motor_id)
url='motors/{}/get/{}'.format(';'.join(targets), register)
return to_api(url).split(';')
'''
re-write function
add check value
'''
def set_positions(motors_id, positions):
positions=set_type(positions)
if 'ERROR' in positions: return positions
for position in positions:
if float(position) < -90 or float(position) > 90:
return 'Value ERROR'
return set_registers(motors_id, 'goal_position', positions)
def set_compliants(motors_id, states):
states=set_type(states)
if 'ERROR' in states: return states
for state in states:
if state == 'True': state='1'
elif state == 'False': state='0'
elif state not in ('0', '1'): return 'Value ERROR'
return set_registers(motors_id, 'compliant', states)
def set_colors(motors_id, colors):
colors=set_type(colors)
if 'ERROR' in colors: return colors
for color in colors:
if color not in ['red','green','pink','blue','yellow','off']:
return 'Value ERROR'
return set_registers(motors_id, 'led', colors)
#before syntaxe, work always + check values
print(set_compliants([1,2],[0,0]))
print(set_registers([1,1,2,3],['led', 'goal_position', 'goal_position', 'led'],['yellow', 45, 25, 'blue']))
print(set_positions([1],[0]))
print(set_compliants([1,2],[1,1]))
print(set_colors([1,2,3],['green']*3))
# + more flxible syntaxe
print(set_compliants('1 2',0))
print(set_registers('1 1 2 3','led goal_position goal_position led','yellow 45 25 blue'))
print(set_positions(1,0))
print(set_compliants([1,2],1))
print(set_colors('1 2 3','green'))
#use your function
import time
for i in range(1,7):
print(set_colors(i,'pink'))
print(get_colors(i))
time.sleep(0.5)
print(set_colors(i,'off'))
print(get_colors(i))
#time.sleep(0.5)
for _ in range(2):
for c in ['red','green','pink','blue','yellow']:
print(set_colors('1 2 3 4 5 6', c))
print(get_colors('1 2 3 4 5 6'))
time.sleep(0.5)
print(set_colors('1 2 3 4 5 6','off'))
print(get_colors('1 2 3 4 5 6'))
time.sleep(0.5)
set_compliants('1 2 3 4 5 6', 0)
set_positions('1 2 3 4 5 6', '0 10 -15 10 0 0')
time.sleep(1.5)
print('motors in position: ', get_positions([1, 2, 3, 4, 5, 6]))
set_positions('1 2 3 4 5 6', -10)
time.sleep(.5)
print('motors in position: ', get_positions('1 2 3 4 5 6'))
set_compliants('1 2 3 4 5 6', 1)
time.sleep(.5)
for i in range(1,7): print('m{} in position {}°'.format(i, get_positions(i)))
number_of_all_motors=len(get_motors_name())
def valid_goto_input(motors_id, positions, durations):
motors_id, positions, durations = set_type(motors_id), set_type(positions), set_type(durations)
if 'ERROR' in (motors_id or positions or durations):
return 'Type ERROR'
if len(positions) == 1:
positions=positions*len(motors_id)
elif len(motors_id) != len(positions):
return 'Size ERROR'
if len(durations) == 1:
durations=durations*len(motors_id)
elif len(durations) != len(durations):
return 'Size ERROR'
number_of_motors=number_of_all_motors
for i, motor_id in enumerate(motors_id):
if int(motor_id) <1 or int(motor_id) > number_of_motors:
return 'Value ERROR'
if float(positions[i]) < -90 or float(positions[i]) > 90:
return 'Value ERROR'
if float(durations[i]) < 0:
return 'Value ERROR'
return motors_id, positions, durations
def set_goto(motors_id, positions, durations):
goto_input = valid_goto_input(motors_id, positions, durations)
if 'ERROR' in goto_input:
return goto_input
else:
motors_id, positions, durations = goto_input
cmd=[]
for i, motor_id in enumerate(motors_id):
cmd.append('m{}:{}:{}'.format(motor_id, positions[i], durations[i]))
cmd=';'.join(cmd)
url='motors/set/goto/'+cmd
return to_api(url)
print(set_compliants('1 2 3 4 5 6', False))
print(set_goto('1 2 3 4 5 6', 10, 1))
time.sleep(1.5)
print(set_goto('1 2 3 4 5 6', -10, 2))
time.sleep(2.5)
print(set_compliants('1 2 3 4 5 6', True))
def get_api(url, hostname='poppy.local', port='8080'):
url_root='http://{}:{}/'.format(hostname, port)
print('> get:',url_root+url)
try:
response = requests.get(url_root+url)
if response.status_code==200:
return response
else:
return 'ERROR {}!'.format(response.status_code)
except:
print('{} is unreachable'.format(url_root))
def post_api(url, value, hostname='poppy.local', port='8080'):
url_root='http://{}:{}/'.format(hostname, port)
print('> post: url=', url_root+url, ' ; value=', value)
try:
response = requests.post(url_root+url, json=value)
if response.status_code==200:
return 'Done!'
else:
return 'ERROR {}!'.format(response.status_code)
except:
print('{} is unreachable'.format(url_root))
HTML(get_api('').text)
def get_motor_list(alias='motors'):
url='motor/{}/list.json'.format(alias)
return get_api(url).json()
def get_motor_register_list(motor_id=1):
url = 'motor/m{}/register/list.json'.format(motor_id)
return get_api(url).json()
def get_motor_register_value(motor_id, register):
url = 'motor/m{}/register/{}'.format(motor_id, register)
return get_api(url).json()[register]
print(get_motor_list())
print(get_motor_register_list())
print(get_motor_register_value(1, 'name'))
print(get_motor_register_value(1, 'present_position'))
print(get_motor_register_value(1, 'compliant'))
print(get_motor_register_value(1, 'angle_limit'))
print(get_motor_register_value(1, 'led'))
def get_sensor_list():
return get_api('sensor/list.json').json()
def get_sensor_register_list(sensor):
url = 'sensor/{}/register/list.json'.format(sensor)
return get_api(url).json()
def get_sensor_register_value(sensor, register):
url = 'sensor/{}/register/{}'.format(sensor, register)
return get_api(url).json()[register]
print(get_sensor_list())
print(get_sensor_register_list('camera'))
print(get_sensor_register_value('camera', 'fps'))
print(get_sensor_register_value('camera', 'resolution'))
print(get_sensor_register_value('camera', 'frame'))
def get_primitive_list():
return get_api('primitive/list.json').json()
def get_primitive_property(primitive_name):
url='primitive/{}/property/list.json'.format(primitive_name)
return get_api(url).json()
def get_primitive_method(primitive_name):
url='primitive/{}/method/list.json'.format(primitive_name)
return get_api(url).json()
print(get_primitive_list())
print(get_primitive_property(get_primitive_list()['primitives'][0]))
print(get_primitive_method(get_primitive_list()['primitives'][0]))
def post_motor_value(motor, register, value):
url = 'motor/m{}/register/{}/value.json'.format(motor, register)
return post_api(url, value)
import time
print(post_motor_value(1, 'compliant', False))
print(get_motor_register_value(1, 'compliant'))
print(post_motor_value(1, 'goal_speed', 25))
print(post_motor_value(1, 'goal_position', 25))
for _ in range(10):
print(get_motor_register_value(1, 'present_position'))
time.sleep(0.1)
print(post_motor_value(1, 'goal_position', 0))
for _ in range(10):
print(get_motor_register_value(1, 'present_position'))
time.sleep(0.1)
print(post_motor_value(1, 'comlpiant', True))
def post_sensor_value(sensor, register, value):
url = 'sensor/{}/register/{}/value.json'.format(sensor, register)
return post_api(url, value)
print(get_sensor_list())
print(get_sensor_register_list('camera'))
print(get_sensor_register_value('camera', 'fps'))
print(post_sensor_value('caemra', 'fps', 15.0))
print(get_sensor_register_value('camera', 'fps'))
def post_primitive_property(primitive, prop, value):
url = 'primitive/{}/property/{}/value.json'.format(primitive, prop)
return post_api(url, value)
def post_primitive_method(primitive, meth, value):
url = 'primitive/{}/method/{}/args.json'.format(primitive, meth)
return post_api(url, value)
print(get_primitive_list())
print(get_primitive_property('rest_posture'))
print(get_primitive_method('rest_posture'))
print(post_primitive_method('rest_posture', 'start', 'start'))
time.sleep(2)
print(post_primitive_method('rest_posture', 'stop', 'stop'))
def set_primitive_action(primitive, action):
if action not in ['start','stop','pause','resume']:
return 'Value ERROR'
url = 'primitive/{}/{}.json'.format(primitive, action)
if get_api(url).status_code==200:
return '{} of {} Done!'.format(action, primitive)
else:
return '{} of {} Fail! Error {}'.format(action, primitive, get_api(url).status_code)
print(get_primitive_list())
print(set_primitive_action('rest_posture', 'start'))
time.sleep(2)
print(set_primitive_action('rest_posture', 'stop'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Second\
Step2: 2.a. Access to API to get values
Step3: 2.b. Get value - with single input -
Step4: http
Step5: 2.b Get value - with multiple inputs -
Step6: 2.c. Set value - with single input -
Step7: 2.c. Set value - with multiple inputs -
Step8: 2.d. Add checking inputs and use your function
Step9: Another URL
Step10: Recap
Step11: 2*.b. Get request
Step12: 2*.c. Post request
|
5,437
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import math
from scipy import stats
%matplotlib inline
data = pd.read_csv('Default.csv')
data = data.drop('Unnamed: 0',axis = 1)
#change Yes, No to 1, 0.
data['def_chg'] = data.default.factorize()[0]
data.head()
#linear regression
regr = linear_model.LinearRegression()
regr.fit(data.balance.reshape(-1,1),data.def_chg)
#logistic regression
log_org = linear_model.LogisticRegression(solver='newton-cg')
log_org.fit(data.balance.reshape(-1,1),data.def_chg)
x_array = np.arange(data.balance.min(),data.balance.max())
plt.figure(figsize=(10,5))
#left
plt.subplot(121)
plt.scatter(data.balance,data.def_chg)
plt.plot(data.balance,regr.predict(data.balance.reshape(-1,1)),color = 'g')
plt.plot([-100,3000],[0,0],linestyle = 'dashed', color = 'k')
plt.plot([-100,3000],[1,1],linestyle = 'dashed', color = 'k')
plt.xlim([-140,3000])
plt.xlabel('Balance')
plt.ylabel('Probability of default')
#right
plt.subplot(122)
plt.scatter(data.balance,data.def_chg)
plt.plot(x_array, log_org.predict_proba(x_array.reshape(-1,1))[:,1], color = 'g')
plt.plot([-100,3000],[0,0],linestyle = 'dashed', color = 'k')
plt.plot([-100,3000],[1,1],linestyle = 'dashed', color = 'k')
plt.xlim([-140,3000])
plt.xlabel('Balance')
plt.ylabel('Probability of default')
print('B0, B1 for linear: ', regr.intercept_, regr.coef_[0])
print('B0, B1 for logistic: ', log_org.intercept_[0], log_org.coef_[0][0])
#model: Y = 3X + 4
#Generate a separate set of training data (25 points) and validation data (15 poins).
pts=25
x_tra = np.linspace(-50,50,num = pts)
x_val = np.linspace(-50,50,num = 15)
B0=4
B1=3
y_act = B0 + B1*x_tra
y_val = B0 + B1*x_val
np.random.seed(123)
#add noise scaled to 25% of range to data
yrand = y_act + .25*(y_act.max()-y_act.min())*np.random.normal(size = pts)
yrand_val = y_val + .25*(y_val.max()-y_val.min())*np.random.normal(size = 15)
plt.figure(figsize=(5,15))
plt.subplot(311)
plt.scatter(x_tra,yrand)
plt.title('Training data (25 points)')
plt.ylabel('y')
plt.xlabel('x')
plt.grid(linestyle = 'dashed')
plt.subplot(312)
plt.scatter(x_val,yrand_val)
plt.title('Validation data (15 points)')
plt.ylabel('y')
plt.xlabel('x')
plt.grid(linestyle = 'dashed')
plt.subplot(313)
#Use a for loop to test adding different percentage strength of error
for i in range(100):
yrand_val = y_val + (i/100)*(y_val.max()-y_val.min())*np.random.normal(size = 15)
#calculate RSE
plt.plot(i/100,math.sqrt((((y_val-yrand_val)**2).sum())/(15-2)),marker='o',color = 'b')
plt.title('Error in testing data as a function of random strength')
plt.ylabel('RSE')
plt.xlabel('Percent Error in Testing Data')
plt.grid(linestyle = 'dashed')
#exact example showed in class
#model: Y = 3X + 4
#size of training data and scale of random noise
pts=25
noisescale=.25
x=np.linspace(-50,50,num=pts)
B0=4
B1=3
yactual=B0+B1*x
np.random.seed(123)
#add noise scaled to 25% of range to data
yrand=yactual+noisescale*(yactual.max()-yactual.min())*np.random.normal(size=pts)
#SLR
regr=linear_model.LinearRegression()
regr.fit(x.reshape(-1,1),yrand)
print('B0, B1: ',regr.intercept_, regr.coef_[0])
ypred = regr.predict(x.reshape(-1,1))
RSS = ((yrand - ypred)**2).sum()
print('RSS =', RSS)
RSE = math.sqrt(RSS/(pts - 2))
print ('RSE =', RSE)
TSS = ((yrand - yrand.mean())**2).sum()
Rsqr = (TSS-RSS)/TSS
print ('R^2 =', Rsqr)
SE_beta0 = math.sqrt((RSE**2)*(1/pts + x.mean()/((x - x.mean())**2).sum()))
SE_beta1 = math.sqrt(RSE**2 / ((x - x.mean())**2).sum())
print('95% confidence interval for B0:',[regr.intercept_ - 2*SE_beta0, regr.intercept_ + 2*SE_beta0])
print('95% confidence interval for B1:',[regr.coef_[0] - 2*SE_beta1,regr.coef_[0] + 2*SE_beta1])
t = (regr.coef_[0] - 0)/SE_beta1
p = stats.t.sf(np.abs(t), pts-2)*2
print('p value of B1:', p)
harvard = pd.read_csv('HCEPD_100K.csv')
regr2 = linear_model.LinearRegression()
regr2.fit(harvard[['mass','voc','e_lumo_alpha']],harvard.pce)
print(regr2.coef_)
print(regr2.intercept_)
# generate matrix X to make predictions of PCE over the X parameter space
pts=100
X=np.zeros((pts,3))
X[:,0]=np.linspace(harvard.mass.min(),harvard.mass.max(),pts)
X[:,1]=np.linspace(harvard.voc.min(),harvard.voc.max(),pts)
X[:,2]=np.linspace(harvard.e_lumo_alpha.min(),harvard.e_lumo_alpha.max(),pts)
# plot the predicted data
plt.figure(figsize=(5,10))
plt.subplot(311)
plt.scatter(harvard.mass,harvard.pce)
plt.plot(X[:,0],regr2.predict(X),color='red',lw=3)
plt.ylabel('PCE')
plt.xlabel('$mass$')
plt.subplot(312)
plt.scatter(harvard.voc,harvard.pce)
plt.plot(X[:,1],regr2.predict(X),color='red',lw=3)
plt.ylabel('PCE')
plt.xlabel('$VOC$')
plt.subplot(313)
plt.scatter(harvard.e_lumo_alpha,harvard.pce)
plt.plot(X[:,2],regr2.predict(X),color='red',lw=3)
plt.ylabel('PCE')
plt.xlabel('$E_{LUMO}$')
#TSS
TSS = 0
pce_mean = harvard.pce.mean()
for i in harvard.pce:
TSS += (i - pce_mean)**2
print('TSS = ', TSS)
#RSS
RSS = 0
for i in range(harvard.shape[0]):
RSS += (harvard.pce[i] - regr2.intercept_ - regr2.coef_[0]*harvard.mass[i]
- regr2.coef_[1]*harvard.voc[i] - regr2.coef_[2]*harvard.e_lumo_alpha[i])**2
print('RSS = ', RSS)
p = 3
n = harvard.shape[0]
F = ((TSS - RSS)/p)/(RSS/(n-p-1))
print('F-statistic = ', F)
#beta 1
RSS0 = 0
for i in range(n):
RSS0 += (harvard.pce[i] - regr2.intercept_ - regr2.coef_[1]*harvard.voc[i]
- regr2.coef_[2]*harvard.e_lumo_alpha[i])**2
F = (RSS0-RSS)/(RSS/(n-p-1))
p = stats.t.sf(np.abs(F),n-2)*2
print("p value of beta1:",p,"(note: 0.0 means < 1e-6)")
#beta 2
RSS0 = 0
for i in range(n):
RSS += (harvard.pce[i] - regr2.intercept_ - regr2.coef_[0]*harvard.mass[i]
- regr2.coef_[2]*harvard.e_lumo_alpha[i])**2
F = (RSS0-RSS)/(RSS/(n-p-1))
p = stats.t.sf(np.abs(F),n-2)*2
print("p value of beta2:",p,"(note: 0.0 means < 1e-6)")
#beta 3
RSS0 = 0
for i in range(n):
RSS += (harvard.pce[i] - regr2.intercept_ - regr2.coef_[0]*harvard.mass[i]
- regr2.coef_[1]*harvard.voc[i])**2
F = (RSS0-RSS)/(RSS/(n-p-1))
p = stats.t.sf(np.abs(F),n-2)*2
print("p value of beta3:",p,"(note: 0.0 means < 1e-6)")
#Create a bootstrap sampling fuction and return average MSE and beta
def bootstrap(data, num, iter_):
MSE_avg = 0
B1_avg = 0
B2_avg = 0
B3_avg = 0
for i in range(iter_):
#choose randomly with replacement
df = data.sample(num, replace = True)
#MLR
regr = linear_model.LinearRegression()
regr.fit(df[['mass','voc','e_lumo_alpha']],df.pce)
pred = regr.predict(df[['mass','voc','e_lumo_alpha']])
#MSE and beta
MSE = ((df.pce - pred)**2).mean()
MSE_avg += MSE
B1_avg += regr.coef_[0]
B2_avg += regr.coef_[1]
B3_avg += regr.coef_[2]
return [MSE_avg/iter_, B1_avg/iter_, B2_avg/iter_, B3_avg/iter_]
#collect these values to plot
MSE_list = []
B1_list = []
B2_list = []
B3_list = []
for i in range(1,51):
MSE_sublist = []
B1_sublist = []
B2_sublist = []
B3_sublist = []
for j in [100,1000,5000]:
bst = bootstrap(harvard, j, i)
MSE_sublist.append(bst[0])
B1_sublist.append(bst[1])
B2_sublist.append(bst[2])
B3_sublist.append(bst[3])
MSE_list.append(MSE_sublist)
B1_list.append(B1_sublist)
B2_list.append(B2_sublist)
B3_list.append(B3_sublist)
#MLR on original data set
x = range(1,51)
regr = linear_model.LinearRegression()
regr.fit(harvard[['mass','voc','e_lumo_alpha']],harvard.pce)
pred = regr.predict(harvard[['mass','voc','e_lumo_alpha']])
MSE = ((harvard.pce - pred)**2).mean()
#MSE
plt.figure(figsize=(5,4))
plt.subplot(111)
plt.plot(x, np.asarray(MSE_list)[:,0], label='size = 100')
plt.plot(x, np.asarray(MSE_list)[:,1], label='size = 1000')
plt.plot(x, np.asarray(MSE_list)[:,2], label='size = 5000')
plt.plot([-2,52],[MSE, MSE],color = 'k', linestyle = 'dashed', label='MLR on original data')
plt.xlim([-2,52])
plt.xlabel('Number of bootstrap samples ')
plt.ylabel('MSE')
plt.legend()
plt.grid()
#beta1
plt.figure(figsize=(16,4))
plt.subplot(131)
plt.plot(x, np.asarray(B1_list)[:,0], label='size = 100')
plt.plot(x, np.asarray(B1_list)[:,1], label='size = 1000')
plt.plot(x, np.asarray(B1_list)[:,2], label='size = 5000')
plt.plot([-2,52],[regr.coef_[0],regr.coef_[0]],color = 'k', linestyle = 'dashed', label='MLR on original data')
plt.xlim([-2,52])
plt.ylabel(chr(946)+'1')
plt.xlabel('Number of bootstrap samples ')
plt.legend()
plt.grid()
#beta2
plt.subplot(132)
plt.plot(x, np.asarray(B2_list)[:,0], label='size = 100')
plt.plot(x, np.asarray(B2_list)[:,1], label='size = 1000')
plt.plot(x, np.asarray(B2_list)[:,2], label='size = 5000')
plt.plot([-2,52],[regr.coef_[1],regr.coef_[1]],color = 'k', linestyle = 'dashed', label='MLR on original data')
plt.xlim([-2,52])
plt.ylabel(chr(946)+'2')
plt.xlabel('Number of bootstrap samples ')
plt.legend()
plt.grid()
#beta3
plt.subplot(133)
plt.plot(x, np.asarray(B3_list)[:,0], label='size = 100')
plt.plot(x, np.asarray(B3_list)[:,1], label='size = 1000')
plt.plot(x, np.asarray(B3_list)[:,2], label='size = 5000')
plt.plot([-2,52],[regr.coef_[2],regr.coef_[2]],color = 'k', linestyle = 'dashed', label='MLR on original data')
plt.xlim([-2,52])
plt.ylabel(chr(946)+'3')
plt.xlabel('Number of bootstrap samples ')
plt.legend()
plt.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Part 3
Step3: In statistics, the p-value represents the probablity of extreme value by assuming H0 is true. When p-value is smaller enough(under desired significance level, $\alpha$), extreme value has very low probablity, but it still exists. Thus, we have confidence to reject the assumption H0 is true. That is, we can state that Ha is true. In contrast, when p-value is larger than $\alpha$, we can only state that we don't have enough evidence to reject H0.
Step4: If we do simple linear regression seperately 3 times, we can compare each $\beta_i$ like this. However, when we do multiple linear regression, which means considering all factors in one regression, $\beta_i$ in this case are affected by each other. Thus, we can't compare like this way.
Step5: F-statistic comes from a hypothesis test, setting the null hypothesis that all $\beta_i = 0$ except for $\beta_0$. When there is no relationship between the response and predictors, we would expect the F-statistic to take on a value close to 1. On the other hand, if Ha (at least one $\beta_i$ is non-zero) is true, we expect F to be greater than 1. In this case, F is larger than a hundred thousand, suggesting that at least one of the predictors(mass, voc, $E_{LUMO}$) must be related to PCE.
Step6: Part 5
|
5,438
|
<ASSISTANT_TASK:>
Python Code:
# imports / display plots in cell output
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import pandas as pd
import seaborn as sns
import statsmodels
# Bayesian Model Selection (bor = .6240)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, positive learning rate, negative learning rate
models = ('Model 1', 'Model 2')
y_pos = np.arange(len(models))
pxp = [0.6880, 0.3120]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b1 (bandit_either) summary data
#b1 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b1_d100_table.csv')
b1 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b1_d100_table.csv')
b1 = b1.drop('subID', axis=1)
data = pd.DataFrame(b1)
data.describe()
# plot differences in payout
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=b1, x='condition', y='payout', palette = ['#FFD479','#D783FF'])
ax.figure.get_axes()[0].set_xticklabels(['bomb','gems'])
#ax.figure.savefig('b1_pointplot')
# Bayesian Model Selection (bor = .778e-21)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b2 (bandit_either) summary data
#b2 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b2_d100_table.csv')
b2 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b2_d100_table.csv')
b2 = b2.drop('subID', axis=1)
data = pd.DataFrame(b2)
data.describe()
# plot preference for gems in terms of door probability
pDoor = b2[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
#ax.figure.savefig('b2_pointplot')
# Bayesian Model Selection (bor = .7427)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.3497, 0.1857, 0.1857, 0.2789]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b3 (bandit_either) summary data
#b3 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/clean/b3_d100_table.csv')
b3 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b3_d100_table.csv')
b3 = b3.drop('subID', axis=1)
data = pd.DataFrame(b3)
data.describe()
# plot preference for gems in terms of door probability
pDoor = b3[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
#ax.figure.savefig('b2_pointplot')
# regression of intergroup bias on model-based preference for gems
data = b3[['igbias','wGems']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'weight on gems parameter'))
ax.savefig('b3_igbias_wGems')
# regression of intergroup bias on preference for gems
data = b3[['igbias','pGems']]
with sns.plotting_context('talk', font_scale=1.4):
ax = (sns.jointplot(x='igbias', y='pGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for gems'))
#ax.savefig('b3_igbias_pGems')
# Bayesian Model Selection (bor = 9.7058e-11)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [2.4264e-11, 2.4264e-11, 2.4264e-11, 1.0000]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b4 (bandit_double) summary data
#b4 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b4_d100_table.csv')
b4 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b4_best_table.csv')
b4 = b4.drop('subID', axis=1)
data = pd.DataFrame(b4)
data.describe()
# Bayesian Model Selection (bor = .0052)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.0013, 0.0013, 0.7480, 0.2494]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b5 (bandit_either) summary data
#b5 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b5_d100_table.csv')
b5 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b5_d100_table.csv')
data = pd.DataFrame(b5)
data.describe()
# plot preference for gems in terms of door probability
pDoor = b5[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b5_pointplot')
# regression of intergroup bias on preference for gems
data = b5[['igbias','pGems']]
with sns.plotting_context('talk', font_scale=1.4):
ax = (sns.jointplot(x='igbias', y='pGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for gems'))
ax.savefig('b5_igbias_pGems')
# Bayesian Model Selection (bor = 4.61e-37)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b6 (bandit_either) summary data
#b6 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b6_d25_table.csv')
b6 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b6_model_4_table.csv')
data = pd.DataFrame(b6)
#data.describe()
# plot preference for gems in terms of door probability
pDoor = b6[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b6_pointplot')
# regression of behavioral 'preference for burn' on intergroup bias
data = b6[['pEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='pEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for earning'))
ax.savefig('b6_pEarn_x_igbias')
# regression of 'preference for earn' parameter on intergroup bias
data = b6[['wEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'weight on earning parameter'))
ax.savefig('b6_wEarn_x_igbias')
# Bayesian Model Selection (bor = 1.410e-7)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.000001, 0.000001, 0.99999, 0.000001]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
# import post-mfit b5 (bandit_either) summary data
#b7 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b7_d25_table.csv')
b7 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b7_model_4_table.csv')
data = pd.DataFrame(b7)
data.describe()
# plot preference for gems in terms of door probability
pDoor = b7[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b7_pointplot')
# regression of behavioral 'preference for burn' on intergroup bias
data = b7[['pEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='pEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for earning'))
ax.savefig('b7_pEarn_x_igbias')
# regression of 'preference for earn' parameter on intergroup bias
data = b7[['wEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'weight on earning parameter'))
ax.savefig('b7_wEarn_x_igbias')
!pwd
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Experiment 1
Step2: Experiment 2
Step3: Experiment 2
Step4: Experiment 3
Step5: Experiment 3
Step6: Experiment 4
Step7: Experiment 4
Step8: Experiment 5
Step9: Experiment 6
Step10: Experiment 6
Step11: Experiment 7
Step12: Experiment 7
|
5,439
|
<ASSISTANT_TASK:>
Python Code:
def expect_value(k, p):
steps = [k / p / (k - i) for i in range(k)]
return sum(steps)
k = 10
ps = [1., .5, .33, .25, .2, .1]
count = np.vectorize(lambda p: expect_value(k, p), otypes=[np.float])(ps)
plt.scatter(ps, count)
plt.xlabel('Lion probability')
plt.ylabel('Purchase count')
count
def prob(N, k, p):
q = 1. - p
dynamic_table = np.zeros((N + 1) * (k + 1)).reshape(k + 1, N + 1)
for n in range(N + 1):
dynamic_table[0][n] = q ** n
for n in xrange(1, N + 1):
for i in range(1, k + 1):
dynamic_table[i][n] = \
dynamic_table[i][n - 1] * (p * float(i) / k + q) + \
dynamic_table[i - 1][n - 1] * p * float(k - i + 1) / k
return dynamic_table[k]
N = 200
k = 10
plt.plot(prob(N, k, 1.), label='p = 1')
plt.plot(prob(N, k, 0.5), label='p = 0.5')
plt.plot(prob(N, k, 0.33), label='p = 0.33')
plt.ylabel('Probability')
plt.xlabel('Kinder surprises')
plt.legend()
purchase_prob = prob(150, 10, 0.33)
count = np.argwhere(purchase_prob >= 0.8).min()
count, purchase_prob[count]
def simulation(k, p):
lion_collection = set()
toy_type_dist = stats.bernoulli(p)
lion_dist = stats.randint(0, k)
purchaes_counter = 0
while len(lion_collection) < k:
purchaes_counter += 1
if toy_type_dist.rvs() == 1:
lion_collection.add(lion_dist.rvs())
return purchaes_counter
purchases = np.vectorize(lambda iteration: simulation(10, .33))(np.arange(10000))
plt.plot(sp.diff(prob(250, 10, 0.33)))
sns.distplot(purchases)
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import scipy as sp
import scipy.stats as stats
%matplotlib inline
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Если бы в каждом яйце был львенок, нужно было бы в среднем купить 29.29 яиц, чтобы собрать коллекцию. Но когда львенок в каждом третьем - это уже 88.76 яиц.
Step2: Я видел, что задача о коллекционере для $p = 1$ разобрана на хабре, но там все магическим образом сведено с ряду Стирлинга 2ого рода с поправочным коэффициентом. Считать заведомо большее число не хочется, чтобы не словить сложностей с большими float'ами. А на асимптотике вроде выигрыша вроде нет, так как здесь нужно точное значение факториала, а не его приближение. Раз есть красивая формула выше, можно сделать простой динамикой.
Step3: Чтобы при $p = 0.33$ собрать коллецию с вероятностью ~80% нужно купить 115 яиц.
Step4: Численный эксперимент
Step5: Важные выводы
|
5,440
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation
from IPython.display import HTML
font = {'size' : 15}
matplotlib.rc('font', **font)
m = 16
L = 2*np.pi
xi=np.fft.fftfreq(m)*m/(L/(2*np.pi))
print(xi)
from ipywidgets import widgets
from ipywidgets import interact, interactive
def plot_sine(wavenumber=4,grid_points=12,plot_sine=True):
"Plot sin(2*pi*p), sampled at m equispaced points."
x = np.linspace(0,1,grid_points+1); # grid
xf = np.linspace(0,1,1000) # fine grid
y = np.sin(wavenumber*np.pi*x)
yf = np.sin(wavenumber*np.pi*xf)
fig = plt.figure(figsize = (8, 6));
ax = fig.add_subplot(1,1,1);
if plot_sine:
ax.plot(xf, yf, 'r-', linewidth=2);
ax.plot(x, y, 'o-', lw=2)
interact(plot_sine, wavenumber=(-30,30,1),
grid_points=(5, 16, 1));
def rk3(u,xi,rhs):
y2 = u + dt*rhs(u,xi)
y3 = 0.75*u + 0.25*(y2 + dt*rhs(y2,xi))
u_new = 1./3 * u + 2./3 * (y3 + dt*rhs(y3,xi))
return u_new
from nodepy import rk
ssp33 = rk.loadRKM('SSP33')
print(ssp33.imaginary_stability_interval())
def rhs(u, xi, equation='KdV'):
uhat = np.fft.fft(u)
if equation == 'Burgers':
return -u*np.real(np.fft.ifft(1j*xi*uhat)) + np.real(np.fft.ifft(-xi**2*uhat))
elif equation == 'KdV':
return -u*np.real(np.fft.ifft(1j*xi*uhat)) - np.real(np.fft.ifft(-1j*xi**3*uhat))
# Grid
m = 256
L = 2*np.pi
x = np.arange(-m/2,m/2)*(L/m)
xi = np.fft.fftfreq(m)*m*2*np.pi/L
dt = 1.73/((m/2)**3)
A = 25; B = 16;
#u = 3*A**2/np.cosh(0.5*(A*(x+2.)))**2 + 3*B**2/np.cosh(0.5*(B*(x+1)))**2
#tmax = 0.006
# Try this one first:
u = 1500*np.exp(-10*(x+2)**2)
tmax = 0.005
uhat2 = np.abs(np.fft.fft(u))
num_plots = 50
nplt = np.floor((tmax/num_plots)/dt)
nmax = int(round(tmax/dt))
fig = plt.figure(figsize=(12,8))
axes = fig.add_subplot(211)
axes2 = fig.add_subplot(212)
line, = axes.plot(x,u,lw=3)
line2, = axes2.semilogy(xi,uhat2)
xi_max = np.max(np.abs(xi))
axes2.semilogy([xi_max/2.,xi_max/2.],[1.e-6,4e8],'--r')
axes2.semilogy([-xi_max/2.,-xi_max/2.],[1.e-6,4e8],'--r')
axes.set_xlabel(r'$x$',fontsize=30)
axes2.set_xlabel(r'$\xi$',fontsize=30)
plt.tight_layout()
plt.close()
frames = [u.copy()]
tt = [0]
uuhat = [uhat2]
for n in range(1,nmax+1):
u_new = rk3(u,xi,rhs)
u = u_new.copy()
t = n*dt
# Plotting
if np.mod(n,nplt) == 0:
frames.append(u.copy())
tt.append(t)
uhat2 = np.abs(np.fft.fft(u))
uuhat.append(uhat2)
def plot_frame(i):
line.set_data(x,frames[i])
power_spectrum = np.abs(uuhat[i])**2
line2.set_data(np.sort(xi),power_spectrum[np.argsort(xi)])
axes.set_title('t= %.2e' % tt[i])
axes.set_xlim((-np.pi,np.pi))
axes.set_ylim((-100,3000))
anim = matplotlib.animation.FuncAnimation(fig, plot_frame,
frames=len(frames), interval=100)
HTML(anim.to_jshtml())
def rhs(u, xi, filtr, equation='KdV'):
uhat = np.fft.fft(u)
if equation == 'Burgers':
return -u*np.real(np.fft.ifft(1j*xi*uhat)) \
+ np.real(np.fft.ifft(-xi**2*uhat))
elif equation == 'KdV':
return -u*np.real(np.fft.ifft(1j*xi*uhat*filtr)) \
- np.real(np.fft.ifft(-1j*xi**3*uhat))
def rk3(u,xi,rhs,filtr):
y2 = u + dt*rhs(u,xi,filtr)
y3 = 0.75*u + 0.25*(y2 + dt*rhs(y2,xi,filtr))
u_new = 1./3 * u + 2./3 * (y3 + dt*rhs(y3,xi,filtr))
return u_new
# Grid
m = 128
L = 2*np.pi
x = np.arange(-m/2,m/2)*(L/m)
xi = np.fft.fftfreq(m)*m*2*np.pi/L
filtr = np.ones_like(xi)
xi_max = np.max(np.abs(xi))
filtr[np.where(np.abs(xi)>xi_max*2./3)] = 0.
dt = 1.73/((m/2)**3)
A = 25; B = 16;
u = 3*A**2/np.cosh(0.5*(A*(x+2.)))**2 + 3*B**2/np.cosh(0.5*(B*(x+1)))**2
#u = 1500*np.exp(-10*(x+2)**2)
tmax = 0.006
uhat2 = np.abs(np.fft.fft(u))
num_plots = 50
nplt = np.floor((tmax/num_plots)/dt)
nmax = int(round(tmax/dt))
fig = plt.figure(figsize=(12,8))
axes = fig.add_subplot(211)
axes2 = fig.add_subplot(212)
line, = axes.plot(x,u,lw=3)
line2, = axes2.semilogy(xi,uhat2)
axes2.semilogy([xi_max/2.,xi_max/2.],[1.e-6,4e8],'--r')
axes2.semilogy([-xi_max/2.,-xi_max/2.],[1.e-6,4e8],'--r')
axes.set_xlabel(r'$x$',fontsize=30)
axes2.set_xlabel(r'$\xi$',fontsize=30)
plt.tight_layout()
plt.close()
frames = [u.copy()]
tt = [0]
uuhat = [uhat2]
for n in range(1,nmax+1):
u_new = rk3(u,xi,rhs,filtr)
u = u_new.copy()
t = n*dt
# Plotting
if np.mod(n,nplt) == 0:
frames.append(u.copy())
tt.append(t)
uhat2 = np.abs(np.fft.fft(u))
uuhat.append(uhat2)
def plot_frame(i):
line.set_data(x,frames[i])
power_spectrum = np.abs(uuhat[i])**2
line2.set_data(np.sort(xi),power_spectrum[np.argsort(xi)])
axes.set_title('t= %.2e' % tt[i])
axes.set_xlim((-np.pi,np.pi))
axes.set_ylim((-100,3000))
anim = matplotlib.animation.FuncAnimation(fig, plot_frame,
frames=len(frames), interval=20)
HTML(anim.to_jshtml())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The FFT, aliasing, and filtering
Step2: As you can see, the return vector starts with the nonnegative wavenumbers, followed by the negative wavenumbers. It may seem strange to you that the range of wavenumbers returned is not symmetric; in the case above, it includes $-8$ but not $+8$. This apparent asymmetry can be explained once one understands the phenomenon known as aliasing.
Step3: Exercise
Step4: Let's check the size of the imaginary axis interval contained in this method's absolute stability region
Step5: Now we'll go ahead and implement our solution, making sure to set the time step according to the condition above.
Step6: In the output, we're plotting the solution (top plot) and its power spectrum ($|\hat{u}|^2$) (bottom plot). There are a lot of interesting things to say about the solution, but for now let's focus on the Fourier transform. Notice how the wavenumbers present in the solution remain in the lower half of those representable on the grid (this region is delimited by the dashed red lines). Because of this, no aliasing occurs.
|
5,441
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
from collections import Counter
counter = Counter(int_words)
train_words = []
t = 1e-5
total_words = len(int_words)
rands = np.power(np.random.uniform(size=len(int_words)), 2.0)
for i, word in enumerate(int_words):
if t / counter[word] * total_words > rands[i]:
train_words.append(word)
print(len(train_words))
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
r = np.random.randint(1, window_size + 1)
start = max(idx - r, 0)
stop = idx + r
return words[start: idx] + words[idx + 1: stop + 1]
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words) // batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name="inputs")
labels = tf.placeholder(tf.int32, [None, None], name="labels")
n_vocab = len(int_to_vocab)
n_embedding = 400 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], -1, 1), name="embedding")
embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
# create softmax weight matrix here
softmax_w = tf.Variable(
tf.truncated_normal([n_vocab, n_embedding])
)
# create softmax biases here
softmax_b = tf.Variable(tf.zeros([n_vocab]))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(
softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab
)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
import random
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
5,442
|
<ASSISTANT_TASK:>
Python Code:
#импортируем библиотеки
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.cluster import DBSCAN
plt.figure(figsize=(12, 12))
n_samples = 2300
random_state = 220
X, y = make_blobs(n_samples=n_samples, random_state=random_state, centers=7)
# Равномерное распределение кластеров
y_pred = KMeans(n_clusters=7, random_state=random_state, n_jobs = -1).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Равномерное распределение кластеров")
# Удлиненные по одной из оси распределение точек
transformation = [[0.70834549, -0.563667341], [-0.30887718, 0.75253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=7, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Удлиненные по оси")
# Кластеры разной дисперсии
X_varied, y_varied = make_blobs(n_samples=n_samples, centers=7,
cluster_std=[1.0, 2.5, 0.5, 3, 0.7, 0.1, 2.3],
random_state=random_state)
y_pred = KMeans(n_clusters=7, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Разная дисперсия точек")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:2000], X[y == 1][:500], X[y == 2][:400], X[y == 3][:300],
X[y == 4][:200],X[y == 5][:120],X[y == 6][:42]))
y_pred = KMeans(n_clusters=7,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Разное количество точек в кластерах")
plt.show()
y_pred = KMeans(n_clusters=7, random_state=random_state, n_jobs = -1).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Равномерное распределение кластеров")
plt.show()
db = DBSCAN(eps=0.35,min_samples=5)
y_pred = db.fit_predict(X_aniso)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Как видим распредение по кластерам оказалось вполне логичным, не смотря на выбор параметров по умолчанию, за исключением второго случая, но там действительно все несколько неочевидно. Но нужно заметить, что мы рассматриваем достаточно простой двухмерный случай, при этом другие алгоритмы кластеризации (их в sklearn достаточно много) могут показать другой и несколько лучший результат.
Step2: http
|
5,443
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
5,444
|
<ASSISTANT_TASK:>
Python Code:
#include some package which we use later on
import numpy as np
#test np.ar -> tab
a = np.array([1,2,3,4])
#test np.array -> shift-tab or np.array?
1+2
3+4
10/2
print(5+2)
3+2
a = 5+2
b = 9
a/b
def sum(a,b): #indent is important in Python!
return a+b
sum(4,4)
def sub(arg1,arg2):
return arg1-arg2
def calc(f, a, b):
return f(a,b)
#functions are first level objects, e.g., can be passed as argument to another function
print('sum ', calc(sum, a, b))
print('sub', calc(sub, a, b))
#array
arr = [1,2,3,4]
#maps aka dictionaries/dicts
dictionary = { 'a': 'Alpha', 'b': 'Beta'}
#array transformation
arr2 = [ a * 2 for a in arr]
dict2 = { k : v.upper() for k,v in dictionary.items()}
print(arr2)
print(dict2)
if a < 5:
print ('small')
else:
print ('large')
c = 'small' if a < 5 else 'large'
c
#what else: generators, iterators, classes, tuples, ...
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interactive Python basics
|
5,445
|
<ASSISTANT_TASK:>
Python Code:
!pygmentize message-dumper.yaml
!kubectl apply -f message-dumper.yaml
!pygmentize broker.yaml
!kubectl create -f broker.yaml
!pygmentize trigger.yaml
!kubectl apply -f trigger.yaml
!pygmentize sklearn-logging.yaml
!kubectl apply -f sklearn-logging.yaml
CLUSTER_IPS=!(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
CLUSTER_IP=CLUSTER_IPS[0]
print(CLUSTER_IP)
SERVICE_HOSTNAMES=!(kubectl get inferenceservice sklearn-iris -o jsonpath='{.status.url}' | cut -d "/" -f 3)
SERVICE_HOSTNAME=SERVICE_HOSTNAMES[0]
print(SERVICE_HOSTNAME)
import requests
def predict(X, name, svc_hostname, cluster_ip):
formData = {
'instances': X
}
headers = {}
headers["Host"] = svc_hostname
res = requests.post('http://'+cluster_ip+'/v1/models/'+name+':predict', json=formData, headers=headers)
if res.status_code == 200:
return res.json()
else:
print("Failed with ",res.status_code)
return []
predict([[6.8, 2.8, 4.8, 1.4]],"sklearn-iris",SERVICE_HOSTNAME,CLUSTER_IP)
!kubectl logs $(kubectl get pod -l serving.knative.dev/configuration=message-dumper -o jsonpath='{.items[0].metadata.name}') user-container
!kubectl delete -f sklearn-logging.yaml
!kubectl delete -f trigger.yaml
!kubectl delete -f message-dumper.yaml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a channel broker.
Step2: Create a Knative trigger to pass events to the message logger.
Step3: Create an sklearn model with associated logger to push events to the message logger URL.
|
5,446
|
<ASSISTANT_TASK:>
Python Code:
s = 'Hello world!'
print(s)
print("length is", len(s))
us = 'Hello 世界!'
print(us)
print("length is", len(us))
bs = s.encode('utf-8')
print(bs)
print("length is", len(bs))
bus = us.encode('utf-8')
print(bus)
print("length is", len(bus))
print(bs.decode('utf-8'))
print(bus.decode('utf-8'))
num = 258
print(num.to_bytes(2, "big"))
print(num.to_bytes(2, "little"))
print(num.to_bytes(4, "big"))
print(num.to_bytes(4, "little"))
import struct
x = 256
print("Network endianess")
print(struct.pack('!h', x))
print("Little endian")
print(struct.pack('<h', x))
print("Big endian")
print(struct.pack('>h', x))
print("Native endianess")
print(struct.pack('=h', x))
bx = struct.pack('!h', x)
print(struct.unpack('!h', bx))
print(struct.unpack('<h', bx))
print(struct.unpack('!h', bx)[0])
print(struct.unpack('<h', bx)[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now encode both strings to bytes.
Step2: Decode back to strings.
Step3: Big Endian vs Little Endian
Step4: struct package
Step5: struct.pack(fmt, v1, v2, …)
Step6: struct.unpack(fmt, buffer)
|
5,447
|
<ASSISTANT_TASK:>
Python Code:
# import libraries
from __future__ import division
import numpy as np
import os
import matplotlib.pyplot as plt
from pyphysio.tests import TestData
%matplotlib inline
# import all pyphysio classes and methods
import pyphysio as ph
# import data and creating a signal
ecg_data = TestData.ecg()
fsamp = 2048
ecg = ph.EvenlySignal(values = ecg_data, sampling_freq = fsamp, signal_type = 'ecg')
# Step 1: Filtering and preprocessing
# (optional) IIR filtering : remove high frequency noise
ecg = ph.IIRFilter(fp=45, fs = 50, ftype='ellip')(ecg)
# normalization : normalize data
ecg = ph.Normalize(norm_method='standard')(ecg)
# resampling : increase the sampling frequency by cubic interpolation
ecg = ecg.resample(fout=4096, kind='cubic')
fsamp = 4096
# Step 2: Information Extraction
ibi = ph.BeatFromECG()(ecg)
# (optional) edit IBI
# ibi_ok = ph.Annotate(ecg, ibi)()
# Step 3: Computation of physiological indicators
# create fake label
label = np.zeros(1200)
label[300:600] = 1
label[900:1200] = 2
label = ph.EvenlySignal(label, sampling_freq = 10, signal_type = 'label')
# define a list of indicators we want to compute
hrv_indicators = [ph.Mean(name='RRmean'), ph.StDev(name='RRstd'), ph.RMSSD(name='rmsSD')]
t_start = [0.5, 15, 98.7]
t_stop = [5, 21, 110.4]
#custom windows
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators)
#custom windows
label_segments = ph.LabelSegments(labels=label)
indicators, col_names = ph.fmap(label_segments, hrv_indicators, ibi)
print(indicators[:, :4])
t_start = [0.5, 15, 78.7]
t_stop = [5, 21, 110.4]
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
plt.figure()
label.plot() #plot the label signal
plt.vlines(t_start, 0, 2, 'g') #plot the start of the segments, green vertical lines
plt.vlines(t_stop, 0, 2, 'r') #plot the end of the segments, red vertical lines
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 110.4]
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 130.4] # endo of the last segments has been changed: 110.4 --> 130.40
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
# drop_cut = True (default) --> drop the segments in which the signal ends before the end of the segment
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 130.4] # endo of the last segments has been changed: 110.4 --> 130.40
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
# drop_cut = False --> keep also the segments in which the signal ends before the end of the segment
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False, drop_cut=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
from os.path import expanduser
home = expanduser("~") # data will be saved in the user's home directory
print(home)
#ibi.to_csv(home+'/IBI.csv')
#ibi.to_pickle(home+'/IBI.pkl')
#ibi2 = ph.from_pickle('IBI.pkl')
#ax1 = plt.subplot(211)
#ibi.plot()
#plt.ylabel('ibi')
#plt.subplot(212, sharex=ax1)
#ibi2.plot()
#plt.ylabel('ibi2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4.1.1 Creation of custom segments
Step2: And then use the function CustomSegments to use the defined instants for the segmentation
Step3: Then the processing can be continued as usual
Step4: Note that we obtained three rows, corresponding to the three custom segments we defined above.
Step5: Then the processing can be continued as usual
Step6: Note that we obtained four rows, corresponding to the four different sessions of the experiment.
Step7: Therefore, the third column of the matrix obtained from the computation of the indicators will contain the information about the session the segment was taken from.
Step8: Note that we obtain only two segments and we miss the last one.
Step9: In this special case the function cannot assign the window to a specific experimental session.
Step10: 4.2 Use pickle to save and load signals
Step11: The following line saves the IBI signal into a csv file.
Step12: However the process of load back the csv file into a Signal to continue the processing is not straightforward.
|
5,448
|
<ASSISTANT_TASK:>
Python Code:
# list
my_list = [1, 4, 5, 9]
print(my_list)
type(my_list)
# accessing each element by index
print(my_list[2])
len(my_list)
# assigning new value
my_list[1] = 12
print(my_list)
# append an element at the end
my_list.append(7)
print(my_list)
help(list)
# String
my_name = 'Anne' # it is also a tuple of characters
my_name[2]
len(my_name)
# sequence string separated by space
seq = 'AAA TTT CCC GGG'
print(seq.split())
?str.split
?str.join
','.join(my_name)
# Sets
my_set = set([1, 2, 3, 3, 3, 4])
print(my_set)
len(my_set)
my_set.add(3) # sets are unordered
print(my_set)
my_set.remove(3)
print(my_set)
# set operation using union | or intersection &
my_first_set = set([1, 2, 4, 6, 8])
my_second_set = set([8, 9, 10])
my_first_set | my_second_set
my_first_set & my_second_set
# Dictionnaries are collections of key/value pairs
my_dict = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine'}
print(my_dict)
my_dict['C']
my_dict['N']
?my_dict.get
my_dict.get('N', 'unknown')
print(my_dict)
len(my_dict)
type(my_dict)
'T' in my_dict
# Assign new key/value pair
my_dict['Y'] = 'Pyrimidine'
print(my_dict)
my_dict['Y'] = 'Cytosine or Thymine'
print(my_dict)
del my_dict['Y']
print(my_dict)
help(dict)
my_dict.keys()
list(my_dict.keys())
my_dict.values()
my_dict.items()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Session 1.4
Step2: Exercises 1.4.1
|
5,449
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tulipy as ti
ti.TI_VERSION
DATA = np.array([81.59, 81.06, 82.87, 83, 83.61,
83.15, 82.84, 83.99, 84.55, 84.36,
85.53, 86.54, 86.89, 87.77, 87.29])
def print_info(indicator):
print("Type:", indicator.type)
print("Full Name:", indicator.full_name)
print("Inputs:", indicator.inputs)
print("Options:", indicator.options)
print("Outputs:", indicator.outputs)
print_info(ti.sqrt)
ti.sqrt(DATA)
print_info(ti.sma)
ti.sma(DATA, period=5)
try:
ti.sma(DATA, period=-5)
except ti.InvalidOptionError:
print("Invalid Option!")
print_info(ti.bbands)
ti.bbands(DATA, period=5, stddev=2)
DATA2 = np.array([83.15, 82.84, 83.99, 84.55, 84.36])
# 'high' trimmed to DATA[-5:] == array([ 85.53, 86.54, 86.89, 87.77, 87.29])
ti.aroonosc(high=DATA, low=DATA2, period=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Information about indicators are exposed as properties
Step2: Single outputs are returned directly. Indicators returning multiple outputs use
Step3: Invalid options will throw an InvalidOptionError
Step4: If inputs of differing sizes are provided, they are right-aligned and trimmed from the left
|
5,450
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.inverse_sparse import mixed_norm, make_stc_from_dipoles
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.viz import (plot_sparse_source_estimates,
plot_dipole_locations, plot_dipole_amplitudes)
print(__doc__)
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
subjects_dir = data_path + '/subjects'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left Auditory'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked.crop(tmin=0, tmax=0.3)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
alpha = 55 # regularization parameter between 0 and 100 (100 is high)
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
n_mxne_iter = 10 # if > 1 use L0.5/L2 reweighted mixed norm solver
# if n_mxne_iter > 1 dSPM weighting can be avoided.
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
depth=depth, fixed=True,
use_cps=True)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute (ir)MxNE inverse solution with dipole output
dipoles, residual = mixed_norm(
evoked, forward, cov, alpha, loose=loose, depth=depth, maxit=3000,
tol=1e-4, active_set_size=10, debias=True, weights=stc_dspm,
weights_min=8., n_mxne_iter=n_mxne_iter, return_residual=True,
return_as_dipoles=True)
plot_dipole_amplitudes(dipoles)
# Plot dipole location of the strongest dipole with MRI slices
idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles])
plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample',
subjects_dir=subjects_dir, mode='orthoview',
idx='amplitude')
# Plot dipole locations of all dipoles with MRI slices
for dip in dipoles:
plot_dipole_locations(dip, forward['mri_head_t'], 'sample',
subjects_dir=subjects_dir, mode='orthoview',
idx='amplitude')
ylim = dict(eeg=[-10, 10], grad=[-400, 400], mag=[-600, 600])
evoked.pick_types(meg=True, eeg=True, exclude='bads')
evoked.plot(ylim=ylim, proj=True, time_unit='s')
residual.pick_types(meg=True, eeg=True, exclude='bads')
residual.plot(ylim=ylim, proj=True, time_unit='s')
stc = make_stc_from_dipoles(dipoles, forward['src'])
solver = "MxNE" if n_mxne_iter == 1 else "irMxNE"
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
fig_name="%s (cond %s)" % (solver, condition),
opacity=0.1)
morph = mne.compute_source_morph(stc, subject_from='sample',
subject_to='fsaverage', spacing=None,
sparse=True, subjects_dir=subjects_dir)
stc_fsaverage = morph.apply(stc)
src_fsaverage_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
src_fsaverage = mne.read_source_spaces(src_fsaverage_fname)
plot_sparse_source_estimates(src_fsaverage, stc_fsaverage, bgcolor=(1, 1, 1),
fig_name="Morphed %s (cond %s)" % (solver,
condition), opacity=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run solver
Step2: Plot dipole activations
Step3: Plot residual
Step4: Generate stc from dipoles
Step5: View in 2D and 3D ("glass" brain like 3D plot)
Step6: Morph onto fsaverage brain and view
|
5,451
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib.pyplot as plt
%matplotlib inline
try:
xrange = xrange
except:
xrange = range
env = gym.make('CartPole-v0')
gamma = 0.99
def discount_rewards(r):
take 1D float array of rewards and compute discounted reward
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
class agent():
def __init__(self, lr, s_size,a_size,h_size):
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in= tf.placeholder(shape=[None,s_size],dtype=tf.float32)
hidden = slim.fully_connected(self.state_in,h_size,biases_initializer=None,activation_fn=tf.nn.relu)
self.output = slim.fully_connected(hidden,a_size,activation_fn=tf.nn.softmax,biases_initializer=None)
self.chosen_action = tf.argmax(self.output,1)
#The next six lines establish the training proceedure. We feed the reward and chosen action into the network
#to compute the loss, and use it to update the network.
self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder)
tvars = tf.trainable_variables()
self.gradient_holders = []
for idx,var in enumerate(tvars):
placeholder = tf.placeholder(tf.float32,name=str(idx)+'_holder')
self.gradient_holders.append(placeholder)
self.gradients = tf.gradients(self.loss,tvars)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders,tvars))
tf.reset_default_graph() #Clear the Tensorflow graph.
myAgent = agent(lr=1e-2,s_size=4,a_size=2,h_size=8) #Load the agent.
total_episodes = 5000 #Set total number of episodes to train agent on.
max_ep = 999
update_frequency = 5
init = tf.global_variables_initializer()
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
i = 0
total_reward = []
total_length = []
gradBuffer = sess.run(tf.trainable_variables())
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
while i < total_episodes:
s = env.reset()
running_reward = 0
ep_history = []
for j in range(max_ep):
#Probabilistically pick an action given our network outputs.
a_dist = sess.run(myAgent.output,feed_dict={myAgent.state_in:[s]})
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
s1,r,d,_ = env.step(a) #Get our reward for taking an action given a bandit.
ep_history.append([s,a,r,s1])
s = s1
running_reward += r
if d == True:
#Update the network.
ep_history = np.array(ep_history)
ep_history[:,2] = discount_rewards(ep_history[:,2])
feed_dict={myAgent.reward_holder:ep_history[:,2],
myAgent.action_holder:ep_history[:,1],myAgent.state_in:np.vstack(ep_history[:,0])}
grads = sess.run(myAgent.gradients, feed_dict=feed_dict)
for idx,grad in enumerate(grads):
gradBuffer[idx] += grad
if i % update_frequency == 0 and i != 0:
feed_dict= dictionary = dict(zip(myAgent.gradient_holders, gradBuffer))
_ = sess.run(myAgent.update_batch, feed_dict=feed_dict)
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
total_reward.append(running_reward)
total_length.append(j)
break
#Update our running tally of scores.
if i % 100 == 0:
print(np.mean(total_reward[-100:]))
i += 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The Policy-Based Agent
Step3: Training the Agent
|
5,452
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
SIZE = 20
prob = np.random.uniform(low=0.0, high=1.0, size=SIZE)
prob = prob/np.sum(prob)
x = range(0,len(prob))
plt.figure(figsize=(10,2))
plt.bar(x, prob, 0.3)
plt.xticks(x, x)
plt.show()
result = np.zeros_like(prob)
for i in range(1000):
value = np.random.choice(range(len(prob)),p=prob)
result[value] += 1
plt.figure(figsize=(10,2))
plt.bar(x, result, 0.3)
plt.xticks(x, x)
plt.show()
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
result_temperature = np.zeros_like(prob)
for i in range(1000):
value = sample(prob,temperature=0.1)
result_temperature[value] += 1
plt.figure(figsize=(10,2))
plt.bar(x, result_temperature, 0.3)
plt.xticks(x, x)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's create a probability vector!
Step2: Using np.random.choice you can use the probability vector to pick random number that will follow the distribution described in the probability vector.
Step3: Now let's define a sample fonction that will do the same than np.random.choice, but will have a 'temperature' parameter.
Step4: You can randomly pick some sample and respect the distribution (temperature = 1) (behave exactly like np.random.choice)
|
5,453
|
<ASSISTANT_TASK:>
Python Code:
i = -7
j = 123
print(i, j)
x = 3.14159
y = -42.3
print(x * y)
k = 1.5e3
l = 3e-2
print(k)
print(l)
s = "ATGTCGTCTACAACACT"
t = 'Serine'
u = "It's a string with apostrophes"
v = A string that extends
over multiple lines
print(v)
a = True
b = False
print(a, b)
z = None
print(z)
a = True
print(a, "is of", type(a))
i = -7
print(i, "is of", type(i))
x = 12.7893
print(x, "is of", type(x))
s = "ATGTCGTCTACAACACT"
print(s, "is of", type(s))
z = None
print(z, "is of", type(z))
print("Hi") # this will be ignored
# as will this
print("Bye")
# print "Never seen"
x = 4.5
y = 2
print('x', x, 'y', y)
print('addition x + y =', x + y)
print('subtraction x - y =', x - y)
print('multiplication x * y =', x * y)
print('division x / y =', x / y)
x = 4.5
y = 2
print('x', x, 'y', y)
print('division x / y =', x / y)
print('floored division x // y =', x // y)
print('modulus (remainder of x/y) x % y =', x % y)
print('exponentiation x ** y =', x ** y)
x = 13
y = 5
print('x * (2 + y) =', x * (2 + y))
print('(x * 2) + y =', (x * 2) + y)
print('x * 2 + y =', x * 2 + y)
13 + 5.0
float(3) + float(7)
int(3.14159) + 1
print('number' + str(3))
print("3/4 =", 3/4) # in Python 2, you would get 0
print("3.0/4 =", 3.0/4)
print("float(3)/4 =", float(3)/4)
x = 3
x += 1 # equivalent to x = x + 1
x
x = 2
y = 10
y *= x
y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Floats
Step2: Floating point numbers can also carry an <tt>e</tt> suffix that states which power of ten they operate at.
Step4: Strings
Step5: Booleans
Step6: The <tt>None</tt> object
Step7: Object type
Step8: Comments
Step9: Arithmetic
Step10: As usual in maths, division and multiplication have higher precedence than addition and subtraction, but arithmetic expressions can be grouped using parentheses to override the default precedence
Step11: You can mix (some) types in arithmetic expressions and python will apply rules as to the type of the result
Step12: You can force python to use a particular type by converting an expression explicitly, using helpful named functions
Step13: The addition operator + allows you also to concatenate strings together.
Step14: Division in Python 2 sometimes trips up new (and experienced!) programmers. If you divide 2 integers you will only get an integer result. If you want a floating point result you should explicitly cast at least one of the arguments to a <tt>float</tt>.
Step15: There are a few shortcut assignment statements to make modifying variables directly faster to type
|
5,454
|
<ASSISTANT_TASK:>
Python Code:
import chaospy
normal = chaospy.Normal(mu=2, sigma=2)
normal
samples = normal.sample(4, seed=1234)
samples
from matplotlib import pyplot
pyplot.hist(normal.sample(10000, seed=1234), 30)
pyplot.show()
normal.sample([2, 2], seed=1234)
import numpy
numpy.random.seed(1234)
normal.sample(4)
normal.pdf([-2, 0, 2])
q_loc = numpy.linspace(-4, 8, 200)
pyplot.plot(q_loc, normal.pdf(q_loc))
pyplot.show()
normal.cdf([-2, 0, 2])
pyplot.plot(q_loc, normal.cdf(q_loc))
pyplot.show()
normal.mom([0, 1, 2])
chaospy.approximate_moment(normal, [2])
(chaospy.E(normal), chaospy.Var(normal),
chaospy.Skew(normal), chaospy.Kurt(normal))
normal_trunc = chaospy.Trunc(normal, upper=4)
pyplot.plot(q_loc, normal_trunc.pdf(q_loc))
pyplot.show()
normal_trunc2 = chaospy.Trunc(normal, lower=-1, upper=5)
pyplot.plot(q_loc, normal_trunc2.pdf(q_loc))
pyplot.show()
normal_gamma = chaospy.J(chaospy.Normal(0, 1), chaospy.Gamma(1))
pyplot.rc("figure", figsize=[12, 4])
pyplot.subplot(131)
pyplot.title("random scatter")
pyplot.scatter(*normal_gamma.sample(1000, seed=1000), marker="x")
pyplot.subplot(132)
pyplot.title("probability density")
grid = numpy.mgrid[-3:3:100j, 0:4:100j]
pyplot.contourf(grid[0], grid[1], normal_gamma.pdf(grid), 50)
pyplot.subplot(133)
pyplot.title("cumulative distibution")
pyplot.contourf(grid[0], grid[1], normal_gamma.cdf(grid), 50)
pyplot.show()
pyplot.subplot(121)
pyplot.title("standard uniform")
u_samples = chaospy.Uniform(0, 1).sample(10000, seed=1234)
pyplot.hist(u_samples, 30)
pyplot.subplot(122)
pyplot.title("transformed normal")
q_samples = normal.inv(u_samples)
pyplot.hist(q_samples, 30)
pyplot.show()
pyplot.subplot(121)
pyplot.title("coupled samples")
pyplot.scatter(q_samples, u_samples)
pyplot.subplot(122)
pyplot.title("normal cumulative distribution")
pyplot.plot(q_loc, normal.cdf(q_loc))
pyplot.show()
pyplot.subplot(121)
pyplot.title("standard uniform")
uu_samples = chaospy.Uniform(0, 1).sample((2, 500), seed=1234)
pyplot.scatter(*uu_samples)
pyplot.subplot(122)
pyplot.title("transformed normal-gamma")
qq_samples = normal_gamma.inv(uu_samples)
pyplot.scatter(*qq_samples)
pyplot.show()
def cdf(x_loc, lo, up):
Cumulative distribution function.
return (x_loc-lo)/(up-lo)
def lower(lo, up):
Lower bounds function.
return lo
def upper(lo, up):
Upper bounds function.
return up
user_distribution = chaospy.UserDistribution(
cdf=cdf, lower=lower, upper=upper, parameters=dict(lo=-1, up=1))
pyplot.subplot(131)
pyplot.title("binned random samples")
pyplot.hist(user_distribution.sample(10000), 30)
pyplot.subplot(132)
pyplot.title("probability density")
x_loc = numpy.linspace(-2, 2, 200)
pyplot.plot(x_loc, user_distribution.pdf(x_loc))
pyplot.subplot(133)
pyplot.title("cumulative distribution")
pyplot.plot(x_loc, user_distribution.cdf(x_loc))
pyplot.show()
def ppf(q_loc, lo, up):
Point percentile function.
return q_loc*(up-lo)+lo
user_distribution = chaospy.UserDistribution(
cdf=cdf, ppf=ppf, parameters=dict(lo=-1, up=1))
def mom(k_loc, lo, up):
Raw statistical moment.
return (up**(k_loc+1)-lo**(k_loc+1))/(k_loc+1)/(up-lo)
def ttr(k_loc, lo, up):
Three terms recurrence.
return 0.5*up+0.5*lo, k_loc**2/(4*k_loc**2-1)*lo**2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The distribution have a few methods that the user can used, which has names
Step2: These can be used to create e.g. histograms
Step3: The input can be both be a integer, but also a sequence of integers. For
Step4: Random seed
Step5: Probability density function
Step6: Cumulative probability function
Step7: Statistical moments
Step8: Not all random variables have raw moment variables, but for these variables
Step9: See quadrature integration for more details on how this
Step10: See descriptive statistics for details on
Step11: and two-sided truncation
Step12: Multivariate variables
Step13: The multivariate variables have the same functionality as the univariate
Step14: Rosenblatt transformation
Step15: Note that u_samples and q_samples here consist of independently
Step16: This idea also generalizes to the multivariate case. There the mapping
Step20: User-defined distributions
Step21: The user-define distribution takes these functions, and a dictionary with the
Step22: The distribution can then be used in the same was as any other
Step24: Alternative, it is possible to define the same distribution using cumulative
Step26: In addition to the required fields, there are a few optional ones. These does
Step28: And three terms recurrence coefficients which is used by the method
|
5,455
|
<ASSISTANT_TASK:>
Python Code:
!pip install -qq git+git://github.com/lindermanlab/ssm-jax-refactor.git
try:
import ssm
except ModuleNotFoundError:
%pip install -qq ssm
import ssm
import jax.numpy as np
import jax.random as jr
import jax.experimental.optimizers as optimizers
from jax import jit, value_and_grad, vmap
try:
from tqdm.auto import trange
except ModuleNotFoundError:
%pip install -qq tqdm
from tqdm.auto import trange
import matplotlib.pyplot as plt
try:
from tensorflow_probability.substrates import jax as tfp
except ModuleNotFoundError:
%pip install -qq tensorflow-probability
from tensorflow_probability.substrates import jax as tfp
try:
from ssm.lds.models import GaussianLDS, PoissonLDS
except ModuleNotFoundError:
%pip install -qq ssm
from ssm.lds.models import GaussianLDS, PoissonLDS
from ssm.distributions.linreg import GaussianLinearRegression
from ssm.utils import random_rotation
from ssm.plots import plot_dynamics_2d
from matplotlib.gridspec import GridSpec
def plot_emissions(states, data):
latent_dim = states.shape[-1]
emissions_dim = data.shape[-1]
num_timesteps = data.shape[0]
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, emissions_dim / latent_dim))
# Plot the continuous latent states
lim = abs(states).max()
plt.subplot(gs[0])
for d in range(latent_dim):
plt.plot(states[:, d] + lim * d, "-")
plt.yticks(np.arange(latent_dim) * lim, ["$x_{}$".format(d + 1) for d in range(latent_dim)])
plt.xticks([])
plt.xlim(0, num_timesteps)
plt.title("Sampled Latent States")
lim = abs(data).max()
plt.subplot(gs[1])
for n in range(emissions_dim):
plt.plot(data[:, n] - lim * n, "-k")
plt.yticks(-np.arange(emissions_dim) * lim, ["$y_{{ {} }}$".format(n + 1) for n in range(emissions_dim)])
plt.xlabel("time")
plt.xlim(0, num_timesteps)
plt.title("Sampled Emissions")
plt.tight_layout()
def plot_emissions_poisson(states, data):
latent_dim = states.shape[-1]
emissions_dim = data.shape[-1]
num_timesteps = data.shape[0]
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, emissions_dim / latent_dim))
# Plot the continuous latent states
lim = abs(states).max()
plt.subplot(gs[0])
for d in range(latent_dim):
plt.plot(states[:, d] + lim * d, "-")
plt.yticks(np.arange(latent_dim) * lim, ["$z_{}$".format(d + 1) for d in range(latent_dim)])
plt.xticks([])
plt.xlim(0, time_bins)
plt.title("Sampled Latent States")
lim = abs(data).max()
plt.subplot(gs[1])
plt.imshow(data.T, aspect="auto", interpolation="none")
plt.xlabel("time")
plt.xlim(0, time_bins)
plt.yticks(ticks=np.arange(emissions_dim))
# plt.ylabel("Neuron")
plt.title("Sampled Emissions (Counts / Time Bin)")
plt.tight_layout()
plt.colorbar()
def plot_dynamics(lds, states):
q = plot_dynamics_2d(
lds._dynamics.weights,
bias_vector=lds._dynamics.bias,
mins=states.min(axis=0),
maxs=states.max(axis=0),
color="blue",
)
plt.plot(states[:, 0], states[:, 1], lw=2, label="Latent State")
plt.plot(states[0, 0], states[0, 1], "*r", markersize=10, label="Initial State")
plt.xlabel("$z_1$")
plt.ylabel("$z_2$")
plt.title("Latent States & Dynamics")
plt.legend(bbox_to_anchor=(1, 1))
# plt.show()
def extract_trial_stats(trial_idx, posterior, all_data, all_states, fitted_lds, true_lds):
# Posterior Mean
Ex = posterior.mean()[trial_idx]
states = all_states[trial_idx]
data = all_data[trial_idx]
# Compute the data predictions
C = fitted_lds.emissions_matrix
d = fitted_lds.emissions_bias
Ey = Ex @ C.T + d
Covy = C @ posterior.covariance()[trial_idx] @ C.T
# basically recover the "true" input to the Poisson GLM
Ey_true = states @ true_lds.emissions_matrix.T + true_lds.emissions_bias
return states, data, Ex, Ey, Covy, Ey_true
def compare_dynamics(Ex, states, data):
# Plot
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
q = plot_dynamics_2d(
true_lds._dynamics.weights,
bias_vector=true_lds._dynamics.bias,
mins=states.min(axis=0),
maxs=states.max(axis=0),
color="blue",
axis=axs[0],
)
axs[0].plot(states[:, 0], states[:, 1], lw=2)
axs[0].plot(states[0, 0], states[0, 1], "*r", markersize=10, label="$z_{init}$")
axs[0].set_xlabel("$z_1$")
axs[0].set_ylabel("$z_2$")
axs[0].set_title("True Latent States & Dynamics")
q = plot_dynamics_2d(
fitted_lds._dynamics.weights,
bias_vector=fitted_lds._dynamics.bias,
mins=Ex.min(axis=0),
maxs=Ex.max(axis=0),
color="red",
axis=axs[1],
)
axs[1].plot(Ex[:, 0], Ex[:, 1], lw=2)
axs[1].plot(Ex[0, 0], Ex[0, 1], "*r", markersize=10, label="$z_{init}$")
axs[1].set_xlabel("$z_1$")
axs[1].set_ylabel("$z_2$")
axs[1].set_title("Simulated Latent States & Dynamics")
plt.tight_layout()
# plt.show()
def compare_smoothened_predictions(Ey, Ey_true, Covy, data):
data_dim = data.shape[-1]
plt.figure(figsize=(15, 6))
plt.plot(Ey_true + 10 * np.arange(data_dim))
plt.plot(Ey + 10 * np.arange(data_dim), "--k")
for i in range(data_dim):
plt.fill_between(
np.arange(len(data)),
10 * i + Ey[:, i] - 2 * np.sqrt(Covy[:, i, i]),
10 * i + Ey[:, i] + 2 * np.sqrt(Covy[:, i, i]),
color="k",
alpha=0.25,
)
plt.xlabel("time")
plt.ylabel("data and predictions (for each neuron)")
plt.plot([0], "--k", label="Predicted") # dummy trace for legend
plt.plot([0], "-k", label="True")
plt.legend(loc="upper right")
# plt.show()
# Some parameters to define our model
emissions_dim = 5 # num_observations
latent_dim = 2
seed = jr.PRNGKey(0)
# Initialize our true Poisson LDS model
true_lds = PoissonLDS(num_latent_dims=latent_dim, num_emission_dims=emissions_dim, seed=seed)
import warnings
num_trials = 5
time_bins = 200
# catch annoying warnings of tfp Poisson sampling
rng = jr.PRNGKey(0)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
all_states, all_data = true_lds.sample(key=rng, num_steps=time_bins, num_samples=num_trials)
plot_emissions_poisson(all_states[0], all_data[0])
plt.savefig("poisson-hmm-data.pdf")
plt.savefig("poisson-hmm-data.png")
latent_dim = 2
seed = jr.PRNGKey(32) # NOTE: different seed!
test_lds = PoissonLDS(num_emission_dims=emissions_dim, num_latent_dims=latent_dim, seed=seed)
rng = jr.PRNGKey(10)
elbos, fitted_lds, posteriors = test_lds.fit(all_data, method="laplace_em", rng=rng, num_iters=25)
# NOTE: you could also call the laplace_em routine directly like this
# from ssm.inference.laplace_em import laplace_em
# elbos, fitted_lds, posteriors = laplace_em(rng,
# test_lds,
# all_data,
# num_iters=25,
# laplace_mode_fit_method="BFGS")
plt.plot(elbos)
plt.show()
num_emissions_channels_to_view = 5
num_trials_to_view = 1
# Ex is expected hidden states, Ey is expected visible values
for trial_idx in range(num_trials_to_view):
states, data, Ex, Ey, Covy, Ey_true = extract_trial_stats(
trial_idx, posteriors, all_data, all_states, fitted_lds, true_lds
)
compare_dynamics(Ex, states, data)
plt.savefig("poisson-hmm-dynamics.pdf")
plt.savefig("poisson-hmm-dynamics.png")
plt.show()
compare_smoothened_predictions(
Ey[:, :num_emissions_channels_to_view],
Ey_true[:, :num_emissions_channels_to_view],
Covy,
data[:, :num_emissions_channels_to_view],
)
plt.savefig("poisson-hmm-trajectory.pdf")
plt.savefig("poisson-hmm-trajectory.png")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports and Plotting Functions
Step2: Sample some synthetic data from the Poisson LDS
Step3: Inference
|
5,456
|
<ASSISTANT_TASK:>
Python Code:
print las._text
lasio.ExcelConverter(las).write('example.xlsx')
import pandas
xls_header_sheet = pandas.read_excel('example.xlsx', sheetname='Header')
xls_header_sheet
xls_data_sheet = pandas.read_excel('example.xlsx', sheetname='Curves')
xls_data_sheet
converter = lasio.ExcelConverter(las)
type(converter.workbook)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now use the ExcelConverter object to produce an Excel spreadsheet
Step2: we can import this spreadsheet back into Python directly using pandas
Step3: The spreadsheet has two sheets, one called "Header" with the header information from each section
Step4: and another called "Curves" containing the data itself
Step5: If you want to further modify the openpyxl.Workbook object, you can access it like so
|
5,457
|
<ASSISTANT_TASK:>
Python Code:
import time
import sys
import random
from pybel.utils import get_version
from pybel.struct.mutation import infer_child_relations
from pybel_tools.visualization import *
from pybel.examples.statin_example import statin_graph, hmgcr_inhibitor, hmgcr, ec_11134
print(time.asctime())
print(sys.version)
# Set seed for visualization
random.seed(127)
print(get_version())
to_jupyter(statin_graph)
infer_child_relations(statin_graph, hmgcr_inhibitor)
to_jupyter(statin_graph)
infer_child_relations(statin_graph, ec_11134)
to_jupyter(statin_graph)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environment
Step2: Dependencies
Step3: Example Graph
Step4: Propogation on Chemical Hierarchy
Step5: Propogation on Protein Hierarchy
|
5,458
|
<ASSISTANT_TASK:>
Python Code:
# To visualize plots in the notebook
%matplotlib inline
# Imported libraries
import csv
import random
import matplotlib
import matplotlib.pyplot as plt
import pylab
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
# Define the logistic function
def logistic(x):
p = 1.0 / (1 + np.exp(-x))
return p
# Plot the logistic function
t = np.arange(-6, 6, 0.1)
z = logistic(t)
#z = np.divide(1.0, 1 + np.exp(-t))
plt.plot(t, z)
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$\phi(t)$', fontsize=14)
plt.title('The logistic function')
plt.grid()
# Weight vector:
w = [1, 4, 8] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1)
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
# SOLUTION TO THE EXERCISE
# Weight vector:
w = [1, 10, 10, -20, 5, 1] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1 + w[3]*np.multiply(xx0,xx0) +
w[4]*np.multiply(xx0,xx1) + w[3]*np.multiply(xx1,xx1))
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
# Adapted from a notebook by Jason Brownlee
def loadDataset(filename, split):
xTrain = []
cTrain = []
xTest = []
cTest = []
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for i in range(len(dataset)-1):
for y in range(4):
dataset[i][y] = float(dataset[i][y])
item = dataset[i]
if random.random() < split:
xTrain.append(item[0:4])
cTrain.append(item[4])
else:
xTest.append(item[0:4])
cTest.append(item[4])
return xTrain, cTrain, xTest, cTest
with open('iris.data', 'rb') as csvfile:
lines = csv.reader(csvfile)
xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)
nTrain_all = len(xTrain_all)
nTest_all = len(xTest_all)
print 'Train: ' + str(nTrain_all)
print 'Test: ' + str(nTest_all)
# Select attributes
i = 0 # Try 0,1,2,3
j = 1 # Try 0,1,2,3 with j!=i
# Select two classes
c0 = 'Iris-versicolor'
c1 = 'Iris-virginica'
# Select two coordinates
ind = [i, j]
# Take training test
X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1])
C_tr = [cTrain_all[n] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1]
Y_tr = np.array([int(c==c1) for c in C_tr])
n_tr = len(X_tr)
# Take test set
X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1])
C_tst = [cTest_all[n] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1]
Y_tst = np.array([int(c==c1) for c in C_tst])
n_tst = len(X_tst)
def normalize(X, mx=None, sx=None):
# Compute means and standard deviations
if mx is None:
mx = np.mean(X, axis=0)
if sx is None:
sx = np.std(X, axis=0)
# Normalize
X0 = (X-mx)/sx
return X0, mx, sx
# Normalize data
Xn_tr, mx, sx = normalize(X_tr)
Xn_tst, mx, sx = normalize(X_tst, mx, sx)
# Separate components of x into different arrays (just for the plots)
x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]
x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]
x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]
x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]
# Scatterplot.
labels = {'Iris-setosa': 'Setosa',
'Iris-versicolor': 'Versicolor',
'Iris-virginica': 'Virginica'}
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
p0_tr = logistic(-np.dot(Z_tr, w))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p1_tr)) - np.dot((1-Y_tr).T, np.log(p0_tr))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w))
# Class
D = [int(round(pn)) for pn in p]
return p, D
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 200 # Number of iterations
# Compute Z's
Z_tr = np.c_[np.ones(n_tr), Xn_tr]
Z_tst = np.c_[np.ones(n_tst), Xn_tst]
n_dim = Z_tr.shape[1]
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Create a regtangular grid.
x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max()
y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()
dx = x_max - x_min
dy = y_max - y_min
h = dy /400
xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),
np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))
X_grid = np.array([xx.ravel(), yy.ravel()]).T
# Compute Z's
Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid]
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
# Put the result into a color plot
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
pp = pp.reshape(xx.shape)
plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
g = 5 # Degree of polynomial
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 8, 4 # Set figure size
for i in [1, 2]:
ax = plt.subplot(1,2,i)
ax.plot(x0c0, x1c0,'r.', label=labels[c0])
ax.plot(x0c1, x1c1,'g+', label=labels[c1])
ax.set_xlabel('$x_' + str(ind[0]) + '$')
ax.set_ylabel('$x_' + str(ind[1]) + '$')
ax.axis('equal')
if i==1:
ax.contourf(xx, yy, pp, cmap=plt.cm.copper)
else:
ax.legend(loc='best')
ax.contourf(xx, yy, np.round(pp), cmap=plt.cm.copper)
def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4):
# Compute Z's
r = 2.0/C
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
p_tr = logistic(np.dot(Z_tr, w))
sk = np.multiply(p_tr, 1-p_tr)
S = np.diag(np.ravel(sk.T))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr))
# Update weights
invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr)))
w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr))
return w, nll_tr
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
C = 1000
g = 4
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(X_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(X_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Create a logistic regression object.
LogReg = linear_model.LogisticRegression(C=1.0)
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Fit model to data.
LogReg.fit(Z_tr, Y_tr)
# Classify training and test data
D_tr = LogReg.predict(Z_tr)
D_tst = LogReg.predict(Z_tst)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
dd = LogReg.predict(Z_grid)
pp = LogReg.predict_proba(Z_grid)[:,1]
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 8, 4 # Set figure size
for i in [1, 2]:
ax = plt.subplot(1,2,i)
ax.plot(x0c0, x1c0,'r.', label=labels[c0])
ax.plot(x0c1, x1c1,'g+', label=labels[c1])
ax.set_xlabel('$x_' + str(ind[0]) + '$')
ax.set_ylabel('$x_' + str(ind[1]) + '$')
ax.axis('equal')
if i==1:
ax.contourf(xx, yy, pp, cmap=plt.cm.copper)
else:
ax.legend(loc='best')
ax.contourf(xx, yy, np.round(pp), cmap=plt.cm.copper)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Introduction
Step2: 2.2. Classifiers based on the logistic model.
Step3: 3.3. Nonlinear classifiers.
Step4: 3. Inference
Step5: Now, we select two classes and two attributes.
Step6: 3.2.2. Data normalization
Step7: Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.
Step8: The following figure generates a plot of the normalized training data.
Step9: In order to apply the gradient descent rule, we need to define two methods
Step10: We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\intercal)^\intercal$.
Step11: 3.2.3. Free parameters
Step12: 3.2.5. Polynomial Logistic Regression
Step13: Visualizing the posterior map we can se that the polynomial transformations produces nonlinear decision boundaries.
Step14: 4. Regularization and MAP estimation.
Step15: 6. Logistic regression in Scikit Learn.
|
5,459
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=200, verbose=1)
export_dir = 'saved_model/1'
tf.saved_model.save(model, export_dir)
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
tflite_model = converter.convert()
tflite_model_file = pathlib.Path('model.tflite')
tflite_model_file.write_bytes(tflite_model)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the TensorFlow Lite model on random input data.
input_shape = input_details[0]['shape']
inputs, outputs = [], []
for _ in range(100):
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]['index'])
# Test the TensorFlow model on random input data.
tf_results = model(tf.constant(input_data))
output_data = np.array(tf_results)
inputs.append(input_data[0][0])
outputs.append(output_data[0][0])
plt.plot(inputs, outputs, 'r')
plt.show()
try:
from google.colab import files
files.download(tflite_model_file)
except:
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Running TFLite models
Step2: Create a basic model of the form y = mx + c
Step3: Generate a SavedModel
Step4: Convert the SavedModel to TFLite
Step5: Initialize the TFLite interpreter to try it out
Step6: Visualize the model
Step7: Download the TFLite model file
|
5,460
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [1e5, 1e6, 1e7]
results = {}
best_val = -1
best_svm = None
pass
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for learning_rate in learning_rates:
for reg in regularization_strengths:
classifier = LinearSVM()
classifier.train(X_train_feats, y_train, learning_rate=learning_rate, reg=reg, num_iters=2000,
batch_size=200, verbose=False)
y_train_predict = classifier.predict(X_train_feats)
y_val_predict = classifier.predict(X_val_feats)
train_accuracy = np.mean(y_train==y_train_predict)
val_accuracy = np.mean(y_val==y_val_predict)
results[(learning_rate, reg,)] = (train_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = classifier
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'train accuracy: %f, val accuracy: %f, lr %e reg %e ' % (
train_accuracy, val_accuracy, lr, reg)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print test_accuracy
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print X_train_feats.shape
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
# grid search
# TODO: try random search for hyperparameters
from itertools import product
best_val_acc = 0
best_net = None
net_results = {}
regs = [1e-3, 5e-3, 1e-2, 1e-1][1:3]
learning_rates = [1e-2, 1e-1, 1][-2:]
num_iters_list = [2000][:]
batch_sizes = [400]
for (
reg,
lr,
num_iters,
batch_size,
) in product(regs,
learning_rates,
num_iters_list,
batch_sizes):
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
learning_rate=lr, reg=reg,
num_iters=num_iters, batch_size=batch_size, verbose=False)
net_results[reg, lr] = (stats['train_acc_history'][-1], stats['val_acc_history'][-1], net)
if stats['val_acc_history'][-1] > best_val_acc:
best_stats = stats
best_net = net
best_val_acc = stats['val_acc_history'][-1]
best_hyperparameters = (hidden_dim, reg, lr, num_iters)
print('Model val_acc: %f, hidden_size: %d, num_iters: %d, lr: %f, reg: %f'
%(stats['val_acc_history'][-1], hidden_dim, num_iters, lr, reg))
print('Best model val_acc: %f, hidden_size: %d, reg: %f, lr: %f, num_iters: %d'
% ((best_val_acc,) + best_hyperparameters))
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in net_results]
y_scatter = [math.log10(x[1]) for x in net_results]
# plot training accuracy
marker_size = 100
colors = [net_results[x][0] for x in net_results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [net_results[x][1] for x in net_results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(best_stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(best_stats['train_acc_history'], label='train')
plt.plot(best_stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print test_acc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
5,461
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import openaq
import warnings
warnings.simplefilter('ignore')
%matplotlib inline
# Set major seaborn asthetics
sns.set("notebook", style='ticks', font_scale=1.0)
# Increase the quality of inline plots
mpl.rcParams['figure.dpi']= 500
api = openaq.OpenAQ()
locations = api.locations(city='Delhi', df=True)
locations.location
locations = locations.query("count > 100").query("lastUpdated >= '2017-03-01'")
locations.location
params = []
for i, r in locations.iterrows():
[params.append(x) for x in r.parameters if x not in params]
params
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Choosing Locations
Step2: Let's go ahead and filter our results to only grab locations that have been updated in 2017 and have at least 100 data points.
Step3: Now that we have several up-to-date locations in Delhi we can use, let's see what parameters we have to play with!
|
5,462
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from os.path import join, exists, expandvars
import pandas as pd
from IPython.display import display, Markdown
import seaborn.xkcd_rgb as colors
from tax_credit.plotting_functions import (pointplot_from_data_frame,
boxplot_from_data_frame,
heatmap_from_data_frame,
per_level_kruskal_wallis,
beta_diversity_pcoa,
average_distance_boxplots,
rank_optimized_method_performance_by_dataset)
from tax_credit.eval_framework import (evaluate_results,
method_by_dataset_a1,
parameter_comparisons,
merge_expected_and_observed_tables,
filter_df)
## project_dir should be the directory where you've downloaded (or cloned) the
## tax-credit repository.
project_dir = join('..', '..')
## expected_results_dir contains expected composition data in the structure
## expected_results_dir/<dataset name>/<reference name>/expected/
expected_results_dir = join(project_dir, "data/precomputed-results/", "mock-community")
## mock_results_fp designates the files to which summary results are written.
## If this file exists, it can be read in to generate results plots, instead
## of computing new scores.
mock_results_fp = join(expected_results_dir, 'broad_sweep_results.tsv')
## results_dirs should contain the directory or directories where
## results can be found. By default, this is the same location as expected
## results included with the project. If other results should be included,
## absolute paths to those directories should be added to this list.
results_dirs = [expected_results_dir]
## directory containing mock community data, e.g., feature table without taxonomy
mock_dir = join(project_dir, "data", "mock-community")
## Minimum number of times an OTU must be observed for it to be included in analyses. Edit this
## to analyze the effect of the minimum count on taxonomic results.
min_count = 1
## Define the range of taxonomic levels over which to compute accuracy scores.
## The default given below will compute order (level 2) through species (level 6)
taxonomy_level_range = range(2,7)
dataset_ids = ['mock-' + str(m) for m in (3, 12, 18, 22, 24, '26-ITS1', '26-ITS9')]
mock_results = evaluate_results(results_dirs,
expected_results_dir,
mock_results_fp,
mock_dir,
taxonomy_level_range=range(2,7),
min_count=min_count,
taxa_to_keep=None,
md_key='taxonomy',
subsample=False,
per_seq_precision=True,
exclude=['other'],
method_ids=['nb-extra'],
append=False,
force=False)
mock_results['Reference'].unique()
#mock_results = filter_df(mock_results, column_name='Reference',
# values=['gg_13_8_otus_amplicon', 'gg_13_8_otus_read', 'gg_13_8_otus_full'],
# exclude=False)
mock_results = mock_results.reset_index(drop=True)
color_pallette={
'nb-extra': 'black'
}
y_vars = ["Precision", "Recall", "F-measure", "Taxon Accuracy Rate", "Taxon Detection Rate"]
pointplot_from_data_frame?
pointplot_from_data_frame(mock_results, "Level", y_vars,
"Reference", "Method", color_pallette)
mock_results['Reference'].unique()
from itertools import product
from pandas import DataFrame, concat, to_numeric
from numpy import mean
import rpy2
%load_ext rpy2.ipython
%R require(rpart)
columns = ['Alpha', 'Class-Prior', 'N-Features', 'Ngram-Range', 'Norm', 'Use-IDF', 'Confidence']
params = DataFrame((s.split(':') for s in mock_results['Parameters']), columns=columns)
keepers = ['Dataset', 'Level', 'Reference']
raw_param_results = concat([mock_results[keepers + ['F-measure']], params], axis=1)
raw_param_results = raw_param_results.apply(to_numeric, errors='ignore')
param_results = raw_param_results.groupby(keepers + columns, as_index=False).mean()
len(param_results)
%%R
recommend_params <- function(data, prior, levels, references)
{
data = data[data[,"Reference"] %in% references,]
data = data[data[,"Class.Prior"] == prior,]
data = data[data[,"Level"] %in% levels,]
fit <- rpart(F.measure ~ Confidence + Use.IDF + Ngram.Range + N.Features + Alpha + Reference + Norm,
data=data,
method="anova",
control=rpart.control(cp=0))
rightmost_leaf <- fit$frame[fit$frame[,"yval"] == max(fit$frame[,"yval"]),]
path.rpart(fit, as.numeric(rownames(rightmost_leaf)))
}
priors = ['uniform', 'prior']
reference_sets = [
['gg_13_8_otus_amplicon', 'gg_13_8_otus_full', 'gg_13_8_otus_read'],
['unite_20.11.2016_clean_amplicon', 'unite_20.11.2016_clean_full',
'unite_20.11.2016_clean_read']
]
level_sets = [[2,3,4,5], [6]]
for prior, levels, references in product(priors, level_sets, reference_sets):
display(Markdown("Prior: `" + prior + '`'))
display(Markdown("References: `" + str(references) + '`'))
display(Markdown("Levels: `" + str(levels) + '`'))
%R -i param_results,prior,levels,references recommend_params(param_results, prior, levels, references)
result = per_level_kruskal_wallis(mock_results, y_vars, group_by='Method',
dataset_col='Reference', level_name='Level',
levelrange=range(2,7), alpha=0.05,
pval_correction='fdr_bh')
result
mock_results_6 = mock_results[mock_results['Level'] == 6]
boxplot_from_data_frame?
boxplot_from_data_frame(mock_results_6, group_by="Method", metric="Precision", color_palette=color_pallette)
boxplot_from_data_frame(mock_results_6, group_by="Method", metric="Recall", color_palette=color_pallette)
boxplot_from_data_frame(mock_results_6, group_by="Method", metric="F-measure", color_palette=color_pallette)
boxplot_from_data_frame(mock_results_6, group_by="Method", metric="Taxon Accuracy Rate", color_palette=color_pallette)
boxplot_from_data_frame(mock_results_6, group_by="Method", metric="Taxon Detection Rate", color_palette=color_pallette)
for i in [n for n in range(1,27)]:
display(Markdown('## mock-{0}'.format(i)))
best = method_by_dataset_a1(mock_results_6, 'mock-{0}'.format(i))
display(best)
for method in mock_results_6['Method'].unique():
top_params = parameter_comparisons(
mock_results_6, method,
metrics=['Precision', 'Recall', 'F-measure',
'Taxon Accuracy Rate', 'Taxon Detection Rate'])
display(Markdown('## {0}'.format(method)))
display(top_params[:10])
uniform_6 = mock_results_6[['uniform' in p for p in mock_results_6['Parameters']]]
for method in uniform_6['Method'].unique():
top_params = parameter_comparisons(
uniform_6, method,
metrics=['Precision', 'Recall', 'F-measure',
'Taxon Accuracy Rate', 'Taxon Detection Rate'])
display(Markdown('## {0}'.format(method)))
display(top_params[:10])
rank_optimized_method_performance_by_dataset(mock_results,
dataset="Reference",
metric="F-measure",
level_range=range(4,7),
display_fields=["Method",
"Parameters",
"Taxon Accuracy Rate",
"Taxon Detection Rate",
"Precision",
"Recall",
"F-measure"],
paired=True,
parametric=True,
color=None,
color_palette=color_pallette)
rank_optimized_method_performance_by_dataset(mock_results,
dataset="Reference",
metric="Taxon Accuracy Rate",
level_range=range(6,7),
display_fields=["Method",
"Parameters",
"Taxon Accuracy Rate",
"Taxon Detection Rate",
"Precision",
"Recall",
"F-measure"],
paired=True,
parametric=True,
color=None,
color_palette=color_pallette)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure local environment-specific values
Step2: Find mock community pre-computed tables, expected tables, and "query" tables
Step3: Restrict analyses to a set of datasets or references
Step4: Compute and summarize precision, recall, and F-measure for mock communities
Step5: CART Analysis
Step6: Split the Parameter String and Aggregate by Community
Step7: Kruskal-Wallis between-method accuracy comparisons
Step8: Violin plots of per-level accuracy
Step9: Method Optimization
Step10: Now we can determine which parameter configuration performed best for each method. Count best values in each column indicate how many samples a given method achieved within one mean absolute deviation of the best result (which is why they may sum to more than the total number of samples).
Step11: Optimized method performance
|
5,463
|
<ASSISTANT_TASK:>
Python Code:
!hostname
%load_ext autoreload
%autoreload 2
%matplotlib inline
import ipyrad
import ipyrad.analysis as ipa
import ipyparallel as ipp
from ipyrad.analysis.popgen import Popgen
from ipyrad import Assembly
from ipyrad.analysis.locus_extracter import LocusExtracter
ipyclient = ipp.Client(cluster_id="popgen")
print(len(ipyclient))
# popgen tools can accept either an ipyrad assembly
data = ipyrad.load_json("/tmp/ipyrad-test/watdo.json")
# or alternatively the path to your VCF or HDF5 formatted snps file
#data = "/tmp/ipyrad-test/watdo_outfiles/watdo.snps.hdf5"
imap = {
"pop1" : ["1A_0", "1B_0", "1C_0", "1D_0"],
"pop2" : ["2E_0", "2F_0", "2G_0", "2H_0"],
"pop3" : ["3I_0", "3J_0", "3K_0", "3L_0"],
}
popgen = Popgen(data=data, imap=imap)
popgen.samples
popgen.params
from IPython.display import display
popgen.run(ipyclient=ipyclient)
popgen.results
display(popgen.results)
from ipyrad.analysis.locus_extracter import LocusExtracter
import ipyparallel as ipp
ipyclient = ipp.Client(cluster_id="popgen")
print(len(ipyclient))
lex = LocusExtracter(
data=data.seqs_database,
imap=imap,
mincov=len(imap), # ENFORCE at least 1 per spp.
)
lex.run(ipyclient=ipyclient)
print(len(popgen.lex.loci))
popgen.lex.get_locus(1, as_df=True)
import pandas as pd
wat = pd.DataFrame()
with h5py.File(data.snps_database, 'r') as io5:
diffs = io5["snps"][0] != io5["snps"][1]
for idx, name in enumerate(io5["snps"].attrs["names"]):
wat[name.decode("utf-8")] = io5["snps"][idx]
wat["1A_0"]
import h5py
with h5py.File(data.seqs_database, 'r') as io5:
print(io5.keys())
print(io5["phymap"].attrs.keys())
print(io5["phymap"].attrs["phynames"])
print(io5["phy"][0])
from collections import Counter
from itertools import combinations
import numpy as np
# Make a processor and give it some data
loci = [lex.get_locus(x, as_df=True) for x in range(2)]
proc = ipa.popgen.Processor(popgen.params, 0, loci)
proc.run()
#proc._pi()
locus = loci[0]
#locus[10][:5] = 82
#display(locus)
#%timeit proc.pi(locus)
print(proc.pi(locus))
print(proc.Watterson(locus))
print(proc.TajimasD(locus))
print(proc.results)
from ipyrad.assemble.utils import DCONS
import pandas as pd
import itertools
p1 = popgen.imap["pop1"]
#locus.loc[p1, :].apply(lambda x: [DCONS[y] for y in x])
cts = np.array(locus.apply(lambda bases:\
Counter(x for x in bases if x not in [45, 78])))
snps = np.array([len(x) for x in cts]) > 1
cts = cts[snps]
def dcons(counter):
new = list(itertools.chain(*[DCONS[x]*ct for x, ct in counter.items()]))
return Counter(new)
print(cts)
%timeit list(map(dcons, cts))
ipyrad.analysis.popgen._calc_sumstats(popgen, 10, loci)
import pickle
!ls analysis-popgen/
with open("analysis-popgen/0.p", 'rb') as inp:
dat = pickle.load(inp)
dat
proc._process_locus_pops(locus, ["pop1", "pop3"])
pop_cts, sidxs = proc._process_locus_pops(locus, ["pop1", "pop2"])
# Between population summary statistics
def _dxy(cts_a, cts_b):
Dxy = 0
ncomps = 0
for cta, ctb in zip(cts_a, cts_b):
ncomps += sum(list(cta.values())) *\
sum(list(ctb.values()))
for ka, va in cta.items():
for kb, vb in ctb.items():
if ka == kb: continue
Dxy += va*vb
print(Dxy, ncomps)
return Dxy/ncomps
Dxy = _dxy(pop_cts["pop1"], pop_cts["pop2"])
Dxy/len(locus)
%timeit proc.Dxy(locus, ["pop1", "pop2"])
proc._fst_full(locus)
print(np.zeros(len(proc.data.imap), len(proc.data.imap)))
Dxy_arr = pd.DataFrame(
data=np.zeros(len(proc.data.imap), len(proc.data.imap)),
index=proc.data.imap.keys(),
columns=proc.data.imap.keys(),
)
loci = [lex.get_locus(x, as_df=True) for x in range(100)]
proc = ipa.popgen.Processor(popgen.params, 0, loci)
proc.run()
import glob
pickles = glob.glob(os.path.join(popgen.workdir, "*.p"))
sorted(pickles, key=lambda x: int(x.rsplit("/", 1)[-1][:-2]))
#pickles[0].rsplit("/", 1)[-1][:-2]
pdicts = {}
for pkl in pickles:
with open(pkl, 'rb') as inp:
pdicts[int(pkl.rsplit("/", 1)[-1][:-2])] = pickle.load(inp)
pdicts[0]["pi"]
#print(pdicts[0]["pi"])
pdicts[0]["Fst"].keys()
full_res = {}
for d in [pdicts]: full_res.update(d)
full_res.keys()
pidx = sorted(full_res.keys())
pi_dict = {}
w_theta_dict = {}
tajd_dict = {}
for idx in pidx:
pi_dict.update(full_res[idx]["pi"])
w_theta_dict.update(full_res[idx]["Watterson"])
tajd_dict.update(full_res[idx]["TajimasD"])
popstats = {}
for pop in proc.imap:
popstats[pop] = pd.DataFrame([], columns=["pi",
"raw_pi",
"Watterson",
"raw_Watterson",
"TajimasD"], index=range(len(popgen.lex.loci)))
for lidx in range(len(popgen.lex.loci)):
popstats[pop]["pi"].loc[lidx] = pi_dict[lidx][pop]["pi_per_base"]
popstats[pop]["raw_pi"].loc[lidx] = pi_dict[lidx][pop]["pi"]
popstats[pop]["Watterson"].loc[lidx] = w_theta_dict[lidx][pop]["w_theta_per_base"]
popstats[pop]["raw_Watterson"].loc[lidx] = w_theta_dict[lidx][pop]["w_theta"]
popstats[pop]["TajimasD"].loc[lidx] = tajd_dict[lidx][pop]
lidx = sorted(full_res.keys())
for idx in lidx[:1]:
for pop in proc.imap:
for bidx in full_res[idx]["pi"]:
print(full_res[idx]["pi"][bidx][pop]["pi_per_base"])
# pi_per_base = np.mean(full_res[idx]["pi"][idx][pop]["pi_per_base"])
# print(pop, pi_per_base)
pi_dict[0]
#popstats["pop1"].mean()
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv("/tmp/gorg-tropics_sags_tableS2 - SAGs.csv")
print(df.columns)
set(df["Lineage"])
#df[["Genome completeness (%)", "Lineage"]]
df[["Raw read count", "Lineage"]]
plt.hist(df[df["Lineage"] == "AEGEAN-169"]["Genome completeness (%)"])
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
fig, ax = plt.subplots(figsize=(10, 10))
for l in set(df["Lineage"]):
#print(l, np.mean(df[df["Lineage"] == l]["Genome completeness (%)"]))
#print(l, np.mean([locale.atoi(x) for x in df[df["Lineage"] == l]["Raw read count"]]))
#print(l, np.std([locale.atoi(x) for x in df[df["Lineage"] == l]["Assembly size (bp)"]]))
lmask = df[np.array(df["Lineage"] == l) + np.array(df["Genome completeness (%)"]>80)]
# cmask = df[df["Genome completeness (%)" > 80]]
try:
alpha=0.05
if l == "AEGEAN-169": alpha=1
plt.hist(lmask["Genome completeness (%)"], alpha=alpha, label=l, bins=40)
except: pass
plt.xlim(80, 100)
plt.legend()
nsamps=10
nspecies=3
dfs = []
for idx in range(nspecies):
df = pd.DataFrame([
[idx] * nsamps,
range(nsamps),
np.random.normal(0, 10, nsamps),
np.random.normal(0, 1, nsamps),
np.random.randint(0, 100, nsamps),
np.random.choice(["small", "medium", "large"], nsamps),
],
index=["Species_ID", "Sample_ID", "Trait1", "Trait2", "Trait3", "Trait4"]).T
dfs.append(df)
df = pd.concat(dfs)
df.to_csv("/tmp/watdo.csv", index=False)
!cat /tmp/watdo.csv
pd.set_option('display.max_rows', 999)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
inf = "/home/isaac/Continuosity/NEON/NEON_seq-metabarcode-zooplankton/NEON.D03.BARC.DP1.20221.001.2019-07.expanded.20210123T023002Z.RELEASE-2021/NEON.D03.BARC.DP1.20221.001.zoo_metabarcodeTaxonomy.2019-07.expanded.20201218T153238Z.csv"
df = pd.read_csv(inf)
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Development of the Processor class to calculate all the stats
Step2: Prototyping the dcons function to split alleles per base
Step3: Loading pickled results
Step4: Prototype Dxy
Step5: Prototype Fst
Step6: Prototyping collating stats across runs
|
5,464
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # Matrix and vector computation package
np.seterr(all='ignore') # ignore numpy warning like multiplication of inf
import matplotlib.pyplot as plt # Plotting library
from matplotlib.colors import colorConverter, ListedColormap # some plotting functions
from matplotlib import cm # Colormaps
# Allow matplotlib to plot inside this notebook
%matplotlib inline
# Set the seed of the numpy random number generator so that the tutorial is reproducable
np.random.seed(seed=1)
# Define and generate the samples
nb_of_samples_per_class = 20 # The number of sample in each class
red_mean = [-1,0] # The mean of the red class
blue_mean = [1,0] # The mean of the blue class
std_dev = 1.2 # standard deviation of both classes
# Generate samples from both classes
x_red = np.random.randn(nb_of_samples_per_class, 2) * std_dev + red_mean
x_blue = np.random.randn(nb_of_samples_per_class, 2) * std_dev + blue_mean
# Merge samples in set of input variables x, and corresponding set of output variables t
X = np.vstack((x_red, x_blue)) # 20x2
t = np.vstack((np.zeros((nb_of_samples_per_class,1)), np.ones((nb_of_samples_per_class,1)))) # 20 x1
# Plot both classes on the x1, x2 plane
plt.plot(x_red[:,0], x_red[:,1], 'ro', label='class red')
plt.plot(x_blue[:,0], x_blue[:,1], 'bo', label='class blue')
plt.grid()
plt.legend(loc=2)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15)
plt.axis([-4, 4, -4, 4])
plt.title('red vs. blue classes in the input space')
plt.show()
# Define the logistic function
def logistic(z):
return 1 / (1 + np.exp(-z))
# Define the neural network function y = 1 / (1 + numpy.exp(-x*w))
# x:20x2 and w: 1x2 so use w.T here
def nn(x, w):
return logistic(x.dot(w.T)) # 20x1 -> this is y
# Define the neural network prediction function that only returns
# 1 or 0 depending on the predicted class
def nn_predict(x,w):
return np.around(nn(x,w))
# Define the cost function
def cost(y, t):
return - np.sum(np.multiply(t, np.log(y)) + np.multiply((1-t), np.log(1-y))) # y and t all 20x1
# Plot the cost in function of the weights
# Define a vector of weights for which we want to plot the cost
nb_of_ws = 100 # compute the cost nb_of_ws times in each dimension
ws1 = np.linspace(-5, 5, num=nb_of_ws) # weight 1
ws2 = np.linspace(-5, 5, num=nb_of_ws) # weight 2
ws_x, ws_y = np.meshgrid(ws1, ws2) # generate grid
cost_ws = np.zeros((nb_of_ws, nb_of_ws)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(nb_of_ws):
for j in range(nb_of_ws):
cost_ws[i,j] = cost(nn(X, np.asmatrix([ws_x[i,j], ws_y[i,j]])) , t)
# Plot the cost function surface
plt.contourf(ws_x, ws_y, cost_ws, 20, cmap=cm.pink)
cbar = plt.colorbar()
cbar.ax.set_ylabel('$\\xi$', fontsize=15)
plt.xlabel('$w_1$', fontsize=15)
plt.ylabel('$w_2$', fontsize=15)
plt.title('Cost function surface')
plt.grid()
plt.show()
# define the gradient function.
def gradient(w, x, t):
return (nn(x, w) - t).T * x
# define the update function delta w which returns the
# delta w for each weight in a vector
def delta_w(w_k, x, t, learning_rate):
return learning_rate * gradient(w_k, x, t)
# Set the initial weight parameter
w = np.asmatrix([-4, -2])
# Set the learning rate
learning_rate = 0.05
# Start the gradient descent updates and plot the iterations
nb_of_iterations = 10 # Number of gradient descent updates
w_iter = [w] # List to store the weight values over the iterations
for i in range(nb_of_iterations):
dw = delta_w(w, X, t, learning_rate) # Get the delta w update
w = w-dw # Update the weights
w_iter.append(w) # Store the weights for plotting
# Plot the first weight updates on the error surface
# Plot the error surface
plt.contourf(ws_x, ws_y, cost_ws, 20, alpha=0.9, cmap=cm.pink)
cbar = plt.colorbar()
cbar.ax.set_ylabel('cost')
# Plot the updates
for i in range(1, 4):
w1 = w_iter[i-1]
w2 = w_iter[i]
# Plot the weight-cost value and the line that represents the update
plt.plot(w1[0,0], w1[0,1], 'bo') # Plot the weight cost value
plt.plot([w1[0,0], w2[0,0]], [w1[0,1], w2[0,1]], 'b-')
plt.text(w1[0,0]-0.2, w1[0,1]+0.4, '$w({})$'.format(i), color='b')
w1 = w_iter[3]
# Plot the last weight
plt.plot(w1[0,0], w1[0,1], 'bo')
plt.text(w1[0,0]-0.2, w1[0,1]+0.4, '$w({})$'.format(4), color='b')
# Show figure
plt.xlabel('$w_1$', fontsize=15)
plt.ylabel('$w_2$', fontsize=15)
plt.title('Gradient descent updates on cost surface')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loss function, chain rule and its derivative
Step2: Plot the cost function and as you can see it's convex and has global optimal minimum.
Step3: The grandinet and delta_w is just simple equations we produced above.
Step4: Start trining and just interating for 10 steps. w = w-dw is key point we update w during each integration.
Step5: Plot just 4 itegrations and you can see it toward to global minimum.
|
5,465
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (13,8)
df = pd.read_csv("./winequality-red.csv")
df.head()
df.shape
#df.loc[df.b > 0, 'd'] = 1
df.loc[df.quality > 5, 'category'] = 1
df.loc[df.quality <= 5, 'category'] = 0
df.category.value_counts()
df.head()
df.corr()
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df, figsize=(15,15), diagonal='kde')
df.plot(x="alcohol", y="category", kind="scatter")
#df.plot(x="alcohol", y="volatile acidity", kind="scatter", c="category")
ax = df[df.category == 1].plot(x="alcohol", y="volatile acidity", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df[df.category == 0].plot(x="alcohol", y="volatile acidity", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
pd.set_option("precision",3)
df.shape
df_train = df.iloc[:1280,]
df_test = df.iloc[1280:,]
X_train = df_train["volatile acidity"]
y_train = df_train["category"]
X_test = df_test["volatile acidity"]
y_test = df_test["category"]
X_train = X_train.reshape(X_train.shape[0],1)
X_test = X_test.reshape(X_test.shape[0],1)
from sklearn.linear_model import LogisticRegression
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
sns.lmplot(data=df, x="alcohol", y="category", logistic=True)
predicted = logistic_model.predict(X_test)
df_compare = pd.DataFrame()
df_compare["actual"] = y_test
df_compare["predicted"] = predicted
df_compare["volatile acidity"] = df_test["volatile acidity"]
ax=df_compare.plot(x="volatile acidity", y="actual", kind="scatter", color="blue", label="actual")
df_compare.plot(x="volatile acidity", y="predicted", kind="scatter", color="red", label="predicted", ax=ax)
df_train = df.iloc[:1280,]
df_test = df.iloc[1280:,]
X_train = df_train[["sulphates", "alcohol"]]
y_train = df_train["category"]
X_test = df_test[["sulphates", "alcohol"]]
y_test = df_test["category"]
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
predicted = logistic_model.predict(X_test)
df_compare = pd.DataFrame()
df_compare["actual"] = y_test
df_compare["predicted"] = predicted
df_compare["sulphates"] = df_test["sulphates"]
df_compare["alcohol"] = df_test["alcohol"]
df_compare.head()
ax = df_compare[df_compare.actual == 1].plot(x="alcohol", y="sulphates", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df_compare[df_compare.actual == 0].plot(x="alcohol", y="sulphates", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
ax = df_compare[df_compare.predicted == 1].plot(x="alcohol", y="sulphates", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df_compare[df_compare.predicted == 0].plot(x="alcohol", y="sulphates", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
from sklearn import metrics
#ols_auc = metrics.roc_auc_score(df_compare.actual, df_compare.predicted)
fpr, tpr, thresholds = metrics.roc_curve(df_compare.actual, df_compare.predicted)
plt.plot(fpr, tpr)
plt.plot([0,1],[0,1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Wine Category
Step2: This is the frequency count for each category
Step3: Visual Exploration
Step4: Alcohol vs Category
Step5: Exercise
Step6: Time to build a predictive model
Step7: It’s a bird… it’s a plane… it… depends on your classifier’s threshold
Step8: Let's add more features - volatile acidity, sulphates, alcohol to predict the category
Step9: Accuracy Metrics
|
5,466
|
<ASSISTANT_TASK:>
Python Code:
test_sentences = [
"the men saw a car .",
"the woman gave the man a book .",
"she gave a book to the man .",
"yesterday , all my trouble seemed so far away ."
]
import nltk
from nltk.corpus import treebank
from nltk.grammar import ProbabilisticProduction, PCFG
# Production count: the number of times a given production occurs
pcount = {}
# LHS-count: counts the number of times a given lhs occurs
lcount = {}
for tree in treebank.parsed_sents():
for prod in tree.productions():
pcount[prod] = pcount.get(prod, 0) + 1
lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
productions = [
ProbabilisticProduction(
p.lhs(), p.rhs(),
prob=pcount[p] / lcount[p.lhs()]
)
for p in pcount
]
start = nltk.Nonterminal('S')
grammar = PCFG(start, productions)
parser = nltk.ViterbiParser(grammar)
from IPython.display import display
for sent in test_sentences:
for res in parser.parse(sent.split()):
display(res)
from nltk.parse.stanford import StanfordDependencyParser
PATH_TO_CORE = r"C:\Users\Martin\CoreNLP\stanford-corenlp-full-2017-06-09"
jar = PATH_TO_CORE + r"\stanford-corenlp-3.8.0.jar"
model = PATH_TO_CORE + r"\stanford-corenlp-3.8.0-models.jar"
dep_parser = StanfordDependencyParser(
jar, model,
model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
)
from collections import defaultdict
def generate_predicates_for_sentence(sentence):
verbs = set()
sbj = {}
obj = {}
sbj_candidates = defaultdict(list)
case = {}
for result in dep_parser.raw_parse(sentence):
relcl_trips = []
for triple in result.triples():
if triple[1] == "nsubj":
verbs.add(triple[0][0])
sbj[triple[0][0]] = triple[2]
if triple[1] == "dobj":
verbs.add(triple[0][0])
obj[triple[0][0]] = triple[2]
if triple[1] == "nsubjpass":
verbs.add(triple[0][0])
obj[triple[0][0]] = triple[2]
if triple[0][1].startswith("V"):
verbs.add(triple[0][0])
if triple[1] == "nmod":
sbj_candidates[triple[0][0]].append(triple[2])
if triple[1] == "acl:relcl":
verbs.add(triple[2][0])
relcl_trips.append(triple)
if triple[1] == "case":
case[triple[0]] = triple[2]
for triple in relcl_trips:
if triple[2][0] in sbj:
if sbj[triple[2][0]][1] in ["WP", "WDT"]:
sbj[triple[2][0]] = triple[0]
else:
obj[triple[2][0]] = triple[0]
else:
sbj[triple[2][0]] = triple[0]
for v in sbj_candidates:
if v not in sbj:
for cand in sbj_candidates[v]:
if case[cand][0] == "by":
sbj[v] = cand
preds = []
for v in verbs:
preds.append(
v + "(" + sbj.get(v, ("None",))[0] +
", " + obj.get(v, ("None",))[0] + ")"
)
return preds
for pred in generate_predicates_for_sentence(
"The man that saw the raven laughed out loud."
):
print(pred)
def generate_predicates_for_text(text):
predicates = []
for sent in nltk.tokenize.sent_tokenize(text):
predicates.extend(generate_predicates_for_sentence(sent))
return predicates
text =
I shot an elephant in my pajamas.
The elephant was seen by a giraffe.
The bird I need is a raven.
The man who saw the raven laughed out loud.
for pred in generate_predicates_for_text(text):
print(pred)
def parent_annotation(tree, parentHistory=0, parentChar="^"):
def pa_rec(node, parents):
originalNode = node.label()
parentString = parentChar + '<' + "-".join(parents) + '>'
node.set_label(node.label() + parentString)
for child in node:
pa_rec(child, [originalNode] + parents[:parentHistory])
return node
return pa_rec(tree, [])
test_tree = nltk.Tree(
"S",
[
nltk.Tree("NP", [
nltk.Tree("DET", []),
nltk.Tree("N", [])
]),
nltk.Tree("VP", [
nltk.Tree("V", []),
nltk.Tree("NP", [
nltk.Tree("DET", []),
nltk.Tree("N", [])
])
])
]
)
parent_annotation(
test_tree
)
def generate_predicates_for_sentence(sentence):
# verbs contains everything that should be treated as verb (i.e. become a predicate)
verbs = set()
# sbj (obj) maps from verbs/predicates to its first (second) argument
sbj = {}
obj = {}
# sbj_candidates maps from a verb to potential subjects
# it is used when the verb-noun relation is not clear (e.g. nmod)
sbj_candidates = defaultdict(list)
# in case, we store information about the kind of PP some noun is found in
case = {}
# the negated-set should contain everything we find under negation
negated = set()
# the conj-dict should connect verbs that are coordinated
# the mapping goes from later verbs to former ones
conj = {}
for result in dep_parser.raw_parse(sentence):
relcl_trips = []
for triple in result.triples():
if triple[1] == "nsubj":
verbs.add(triple[0][0])
sbj[triple[0][0]] = triple[2]
if triple[1] == "dobj":
verbs.add(triple[0][0])
obj[triple[0][0]] = triple[2]
if triple[1] == "nsubjpass":
verbs.add(triple[0][0])
obj[triple[0][0]] = triple[2]
if triple[0][1].startswith("V"):
verbs.add(triple[0][0])
if triple[1] == "nmod":
sbj_candidates[triple[0][0]].append(triple[2])
if triple[1] == "acl:relcl":
verbs.add(triple[2][0])
relcl_trips.append(triple)
if triple[1] == "case":
case[triple[0]] = triple[2]
if triple[1] == "neg":
negated.add(triple[0][0])
if triple[1] == "conj":
verbs.add(triple[0][0])
verbs.add(triple[2][0])
conj[triple[2][0]] = triple[0][0]
for triple in relcl_trips:
if triple[2][0] in sbj:
if sbj[triple[2][0]][1] in ["WP", "WDT"]:
sbj[triple[2][0]] = triple[0]
else:
obj[triple[2][0]] = triple[0]
else:
sbj[triple[2][0]] = triple[0]
for v in sbj_candidates:
if v not in sbj:
for cand in sbj_candidates[v]:
if case[cand][0] == "by":
sbj[v] = cand
for v in verbs:
# if we do not have the subject of a verb which is coordinated with some other verb,
# they probably share their subject
if v not in sbj and v in conj and conj[v] in sbj:
sbj[v] = sbj[conj[v]]
preds = []
negator = lambda v: "not_" + v if v in negated else v
for v in verbs:
preds.append(
negator(v) + "(" + sbj.get(v, ("None",))[0] +
", " + obj.get(v, ("None",))[0] + ")"
)
return preds
for pred in generate_predicates_for_sentence(
"Peter saw the elephant, drank wine and laughed."
):
print(pred)
text =
I see an elephant.
You didn't see the elephant.
Peter saw the elephant and drank wine.
for pred in generate_predicates_for_text(text):
print(pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Aufgabe 2 Informationsextraktion per Syntaxanalyse
Step3: Hausaufgaben
Step5: Aufgabe 4 Mehr Semantik für IE
|
5,467
|
<ASSISTANT_TASK:>
Python Code:
from itertools import combinations
import skrf as rf
%matplotlib inline
from pylab import *
rf.stylely()
wg = rf.wr10
wg.frequency.npoints = 101
dut = wg.random(n_ports = 4,name= 'dut')
dut
loads = [wg.load(.1+.1j),
wg.load(.2-.2j),
wg.load(.3+.3j),
wg.load(.5),
]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
port_combos
composite = wg.match(nports = 4) # composite network, to be filled.
measured,measured_renorm = {},{} # measured subnetworks and renormalized sub-networks
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from
composite.renormalize(50)
measured_renorm
s11_set = rf.NS([measured[k] for k in measured if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
s11_set = rf.NS([measured_renorm[k] for k in measured_renorm if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
composite == dut
sum((composite - dut).s_mag)
def tippits(dut, gamma, noise=None):
'''
simulate tippits technique on a 4-port dut.
'''
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
loads = [wg.load(gamma) for k in ports]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
composite = wg.match(nports = dut.nports) # composite network, to be filled.
#measured,measured_renorm = {},{} # measured subnetworks and renormalized sub-networks
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
if noise is not None:
two_port.add_noise_polar(*noise)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from
composite.renormalize(50)
return composite
wg.frequency.npoints = 11
dut = wg.random(4)
#er = lambda gamma: mean((tippits(dut,gamma)-dut).s_mag)/mean(dut.s_mag)
def er(gamma, *args):
return max(abs(tippits(dut, rf.db_2_mag(gamma),*args).s_db-dut.s_db).flatten())
gammas = linspace(-80,0,11)
title('Error vs $|\Gamma|$')
plot(gammas, [er(k) for k in gammas])
plot(gammas, [er(k) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
figure()
#er = lambda gamma: max(abs(tippits(dut,gamma,(1e-5,.1)).s_db-dut.s_db).flatten())
noise = (1e-5,.1)
title('Error vs $|\Gamma|$ with reasonable noise')
plot(gammas, [er(k, noise) for k in gammas])
plot(gammas, [er(k,noise) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we create a Media object, which is used to generate networks for testing. We will use WR-10 Rectangular waveguide.
Step2: Next, lets generate a random 4-port network which will be the DUT, that we are trying to measure with out 2-port network analyzer.
Step3: Now, we need to define the loads used to terminate each port when it is not being measured, note as described in [1] not more than one can be have full reflection, $|\Gamma| = 1$
Step4: Create required measurement port combinations. There are 6 different measurements required to measure a 4-port with a 2-port VNA. In general, #measurements = $n\choose 2$, for n-port DUT on a 2-port VNA.
Step5: Now to do it. Ok we loop over the port combo's and connect the loads to the right places, simulating actual measurements. Each raw subnetwork measurement is saved, along with the renormalized subnetwork. Finally, we stuff the result into the 4-port composit network.
Step6: Results
Step7: Plotting all three raw measurements of $S_{11}$, we can see that they are not in agreement. These plots answer to plots 5 and 7 of [1]
Step8: However, the renormalized measurements agree perfectly. These plots answer to plots 6 and 8 of [1]
Step9: Test For Accuracy
Step10: Nice!. How close ?
Step11: Dang!
|
5,468
|
<ASSISTANT_TASK:>
Python Code:
from msmbuilder.example_datasets import QuadWell
from msmbuilder.msm import MarkovStateModel
from msmbuilder.lumping import MVCA
import numpy as np
import scipy.cluster.hierarchy
import matplotlib.pyplot as plt
% matplotlib inline
q = QuadWell(random_state=998).get()
ds = q['trajectories']
def regular_spatial_clustering(ds, n_bins=20, halfwidth=np.pi):
new_ds = []
for t in ds:
new_t = []
for i, f in enumerate(t):
width = 2*halfwidth
temp = f + halfwidth
reg = np.floor(n_bins*temp/width)
new_t.append(int(reg))
new_ds.append(np.array(new_t))
return new_ds
halfwidth = max(np.abs([max(np.abs(f)) for f in ds]))[0]
assignments = regular_spatial_clustering(ds, halfwidth=halfwidth)
msm_mdl = MarkovStateModel()
msm_mdl.fit(assignments)
def get_centers(n_bins=20, halfwidth=np.pi):
centers = []
tot = 2*halfwidth
interval = tot/n_bins
for i in range(n_bins):
c = (i+1)*interval - interval/2. - halfwidth
centers.append(c)
return(centers)
ccs = get_centers(halfwidth=halfwidth)
nrgs = [-0.6*np.log(p) for p in msm_mdl.populations_]
m,s,b = plt.stem(ccs, nrgs, 'deepskyblue', bottom=-1)
for i in s:
i.set_linewidth(8)
potential = lambda x: 4 * (x ** 8 + 0.8 * np.exp(-80 * x ** 2) + 0.2 * np.exp(
-80 * (x - 0.5) ** 2) +
0.5 * np.exp(-40 * (x + 0.5) ** 2))
exes = np.linspace(-np.pi,np.pi,1000)
whys = potential(exes)
plt.plot(exes, whys, linewidth=2, color='k')
plt.xlim([-halfwidth, halfwidth])
plt.ylim([0,4])
mvca = MVCA.from_msm(msm_mdl, n_macrostates=None, get_linkage=True)
scipy.cluster.hierarchy.dendrogram(mvca.linkage,
color_threshold=1.1,
no_labels=True)
plt.show()
for i in range(19):
s = str(i+1)
plt.scatter([i+1], mvca.elbow_data[i], color='k', marker=r'$%s$'%(i+1),
s=60*(np.floor((i+1)/10)+1)) # so numbers are approximately the same size
plt.xlabel('Number of macrostates')
plt.xticks([])
plt.show()
color_list = ['deepskyblue', 'hotpink', 'turquoise', 'indigo', 'gold',
'olivedrab', 'orangered', 'whitesmoke']
def plot_macrostates(n_macrostates=4):
mvca_mdl = MVCA.from_msm(msm_mdl, n_macrostates=n_macrostates)
for i, _ in enumerate(mvca_mdl.microstate_mapping_):
m,s,b = plt.stem([ccs[i]], [nrgs[i]],
color_list[mvca_mdl.microstate_mapping_[i]],
markerfmt=' ', bottom=-1)
for i in s:
i.set_linewidth(5)
plt.plot(exes, whys, color='black', linewidth=1.5)
plt.ylim([0,4])
plt.xlim([-halfwidth,halfwidth])
plt.subplots(nrows=2, ncols=3, figsize=(12,6))
for i in range(6):
plt.subplot(2,3,i+1)
plot_macrostates(n_macrostates=i+2)
plt.title('%i macrostates'%(i+2))
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the dataset
Step2: Define a regular spatial clusterer
Step3: Plot our MSM energies
Step4: Make a model with out macrostating to get linkage information
Step5: Use mvca.linkage to get a scipy linkage object
Step6: Use mvca.elbow_distance to get the objective function change with agglomeration
Step7: Plot some macrostate models
|
5,469
|
<ASSISTANT_TASK:>
Python Code:
# flipping signs of numbers...
a = 5
b = -5
print(-a, -b)
# len function
x1 = []
x2 = "12"
x3 = [1,2,3]
print(len(x1), len(x2), len(x3))
x = [1,2,3]
print(x[100]) # <--- IndexError! 100 is waayyy out of bounds
string = "hello"
print(string[0]) # first item
print(string[len(string)-1]) # last item
a_string = "Hello"
# indexing first item...
print(a_string[0]) # Readable
print(a_string[-len(a_string)]) # Less readable
print(a_string[-1]) # Readable
print(a_string[len(a_string)-1]) # Less readable
print(a_string[4]) # Avoid this whereever possible! BAD BAD BAD!!
a_list = ["qwerty", "dave", "magic johnson", "qwerty"]
a_string = "Helllllllo how ya doin fam?"
# notice that Python returns the index of the first match.
print(a_list.index("qwerty"))
print(a_string.index("l"))
# if item is not in the list, you get an value error:
print(a_list.index("chris"))
a_list = [1,2,3]
print(a_list)
a_list[-1] = "a"
print(a_list)
a_list[0] = "c"
print(a_list)
a_list[1] = "b"
print(a_list)
a_string = "123"
a_string[0] = "a" # <-- Error; strings are an "immutable" data type in Python.
a_string = "123"
a_string = "a" + a_string[1:] # slicing, see below.
print(a_string)
this_is_insane = [ [[[[[[[[[[[[100]]]]]]]]]]]] ] # WTF !!??
print(this_is_insane[0][0][0][0][0][0][0][0][0][0][0][0][0])
grid = [ ["0"] * 5 for _ in range(5) ] # building a nested list, in style. 'List Comprehensions' are not covered in this course.
print("The Grid looks like this...:", grid[2:], "\n")
# Note: "grid[2:]" above is a 'slice' (more on slicing below), in this case I'm using slicing to truncate the results,
# observe that three lists get printed, not five.
def print_grid():
This function simply prints grid, row by row.
for row in grid: # This is a for-loop, more on these later!
print(row)
print_grid()
print("\n")
grid[0][0] = "X" # Top-left corner
grid[0][-1] = "Y" # Top-right corner
grid[-1][0] = "W" # Bottom-left corner
grid[-1][-1] = "Z" # Bottom-right corner
grid[2][2] = "A" # Somewhere near the middle
print_grid()
# Quick note, since the corners index are defined by 0 and -1, these numbers should work for all nxn grids.
lst = list(range(1,21)) # list(range) just makes a list of numbers 1 to 20
# The below function just makes it faster for me to type out the test cases below.
def printer(start, end, lst):
Helper function, takes two integers (start, end) and a list/string.
Function returns a formated string that contains: start, end and lst[start:end]
if start:
if end:
sliced = lst[start:end]
else:
sliced = lst[start:]
elif end:
sliced = lst[:end]
else:
sliced = lst[:]
return "slice is '[{}:{}]', which returns: {}".format(start, end, sliced)
print("STARTING LIST IS:", lst)
print("")
# Test cases
print("SLICING LISTS...")
print(printer("","", lst)) # [:] is sometimes called a 'shallow copy' of a list.
print(printer("", 5, lst )) # first 5 items.
print(printer(14,"", lst)) # starting at index 14, go to the end.
print(printer(200,500,lst)) # No errors for indexes that should be "out of bounds".
print(printer(5,10, lst))
print(printer(4,5, lst))
# Negative numbers work too. In the case below we start at the 5th last item and move toward the 2nd to last item.
print(printer(-5,-2, lst))
print(printer(-20,-1, lst)) # note that this list finishes at 19, not 20.
# and for good measure, a few strings:
print("\nSLICING STRINGS...")
a_string = "Hello how are you?"
print(printer("","", a_string)) # The whole string aka a 'shallow copy'
print(printer(0,5, a_string))
print(printer(6,9, a_string))
print(printer(10,13, a_string))
print(printer(14, 17, a_string))
print(printer(17, "", a_string))
a_list = list(range(1,21))
sliced_list = a_list[15:19:2]
print(sliced_list)
print(a_list[17])
a_list = list(range(0,206))
slice1 = a_list[::10] # every 10th element starting from zero = [0, 10, 20, ...]
slice2 = a_list[5::10] # every 10th element starting from 5 = [5, 15, 25,...]
a_string = "a123a123a123a123a123a123a123" # this pattern has a period of 4.
slice3 = a_string[::4] # starts at a, returns aaaaaa
slice4 = a_string[3::4] # starts at 3, returns 333333
print(slice1, slice2, slice3, slice4, sep="\n")
a_list = list(range(1, 11))
print(a_list)
print(a_list[::-1]) # reverses the list
list_1 = list(range(1,21))
list_1 = list_1[2::3]
print(list_1)
# The above 3 lines can be refactored to:
list_2 = list(range(3, 21, 3))
print(list_2)
list_3 = list(range(10, -1, -1)) # this says: "start at the number 10 and count backwards to 0
# please remember that start points are inclusive BUT endpoints are exclusive,
# if we want to include 0 in the results we must have an endpoint +1 of our target.
# in this case the number one past zero (when counting backwards) is -1.
print(list_3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, those bounds I have just given might sound a bit arbitrary, but actually I can explain exactly how they work. Consider the following picture
Step2: So that explains the first row of numbers in the image. What about the second row? Well, in Python not only can you index forwards you can also index backwards.
Step3: You might wonder what is wrong with index[4] to reference the end of the list.
Step4: What can we do with indexing?
Step5: Can we change the values inside strings?
Step6: In python strings are immutable, which is a fancy way of saying that they are set in stone; once created you just can't change them. Your only option is to create new strings with data you want.
Step7: Making Grids
Step9: To index a list inside a list the syntax is to add another [{integer}] on the end. Repeat until you get to the required depth.
Step11: Anyway, thats enough about indexing for now, let's move onto the topic of slicing...
Step12: Alright, so that's the basics of slicing covered, the only remaining question is what the final "step" argument does. Well basically, the step allows us to 'skip' every nth element of the list/string.
Step13: How does this work? Well, index 15 is the number 16 (remember we count from 0 in Python), and then we skip index 16 (an odd number) and go straight to index 17 (which is the number 18). The next index to look at is 20, but since that is larger than our end step (19) we terminate.
Step14: In both of the above cases we are using a step of size 10. If we start at 0 that means we get
Step15: The Range Function
Step16: You will note a small difference between the two ways of doing things. When we slice we start the the count at 2 whereas with range we start the count at 3. The difference is the result of the fact the range function is dealing with numbers, whereas the slice is using indexing (e.g. list_1[2] is the number 3).
|
5,470
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from pandas import DataFrame
url="https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data"
df = pd.read_csv(url,header=None)
df.describe()
pd.options.display.max_columns=70
df.describe()
import numpy as np
import pylab
import scipy.stats as stats
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
%matplotlib inline
stats.probplot(df[4], dist="norm", plot=pylab)
pylab.show()
df[60].unique()
df.corr()
import matplotlib.pyplot as plot
plot.pcolor(df.corr())
plot.show()
df.corr()[0].plot()
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
import pandas as pd
from pandas import DataFrame
w_df = pd.read_csv(url,header=0,sep=';')
w_df.describe()
w_df['volatile acidity']
w_df.corr()
import matplotlib.pyplot as plot
plot.pcolor(w_df.corr())
plot.show()
w_df.corr()['fixed acidity'].plot()
from pandas.tools.plotting import scatter_matrix
p=scatter_matrix(w_df, alpha=0.2, figsize=(12, 12), diagonal='kde')
import numpy as np
import pylab
import scipy.stats as stats
%matplotlib inline
stats.probplot(w_df['alcohol'], dist="norm", plot=pylab)
pylab.show()
import numpy
import random
from sklearn import datasets, linear_model
from sklearn.metrics import roc_curve, auc
import pylab as pl
import pandas as pd
from pandas import DataFrame
url="https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data"
df = pd.read_csv(url,header=None)
df.describe()
df[60]=np.where(df[60]=='R',0,1)
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size = 0.3)
x_train = train.iloc[0:,0:60]
y_train = train[60]
x_test = test.iloc[0:,0:60]
y_test = test[60]
y_train
model = linear_model.LinearRegression()
model.fit(x_train,y_train)
training_predictions = model.predict(x_train)
print(np.mean((training_predictions - y_train) ** 2))
print('Train R-Square:',model.score(x_train,y_train))
print('Test R-Square:',model.score(x_test,y_test))
print(max(training_predictions),min(training_predictions),np.mean(training_predictions))
def confusion_matrix(predicted, actual, threshold):
if len(predicted) != len(actual): return -1
tp = 0.0
fp = 0.0
tn = 0.0
fn = 0.0
for i in range(len(actual)):
if actual[i] > 0.5: #labels that are 1.0 (positive examples)
if predicted[i] > threshold:
tp += 1.0 #correctly predicted positive
else:
fn += 1.0 #incorrectly predicted negative
else: #labels that are 0.0 (negative examples)
if predicted[i] < threshold:
tn += 1.0 #correctly predicted negative
else:
fp += 1.0 #incorrectly predicted positive
rtn = [tp, fn, fp, tn]
return rtn
testing_predictions = model.predict(x_test)
testing_predictions = model.predict(x_test)
confusion_matrix(testing_predictions,np.array(y_test),0.5)
cm = confusion_matrix(testing_predictions,np.array(y_test),0.5)
misclassification_rate = (cm[1] + cm[2])/len(y_test)
misclassification_rate
[tp, fn, fp, tn] = confusion_matrix(testing_predictions,np.array(y_test),0.5)
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f_score = 2 * (precision * recall)/(precision + recall)
print(precision,recall,f_score)
[tp, fn, fp, tn] = confusion_matrix(testing_predictions,np.array(y_test),0.9)
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f_score = 2 * (precision * recall)/(precision + recall)
print(precision,recall,f_score)
positives = list()
negatives = list()
actual = np.array(y_train)
for i in range(len(y_train)):
if actual[i]:
positives.append(training_predictions[i])
else:
negatives.append(training_predictions[i])
df_p = pd.DataFrame(positives)
df_n = pd.DataFrame(negatives)
fig, ax = plt.subplots()
a_heights, a_bins = np.histogram(df_p)
b_heights, b_bins = np.histogram(df_n, bins=a_bins)
width = (a_bins[1] - a_bins[0])/3
ax.bar(a_bins[:-1], a_heights, width=width, facecolor='cornflowerblue')
ax.bar(b_bins[:-1]+width, b_heights, width=width, facecolor='seagreen')
positives = list()
negatives = list()
actual = np.array(y_test)
for i in range(len(y_test)):
if actual[i]:
positives.append(testing_predictions[i])
else:
negatives.append(testing_predictions[i])
df_p = pd.DataFrame(positives)
df_n = pd.DataFrame(negatives)
fig, ax = plt.subplots()
a_heights, a_bins = np.histogram(df_p)
b_heights, b_bins = np.histogram(df_n, bins=a_bins)
width = (a_bins[1] - a_bins[0])/3
ax.bar(a_bins[:-1], a_heights, width=width, facecolor='cornflowerblue')
ax.bar(b_bins[:-1]+width, b_heights, width=width, facecolor='seagreen')
from sklearn.metrics import roc_curve, auc
(fpr, tpr, thresholds) = roc_curve(y_train,training_predictions)
area = auc(fpr,tpr)
pl.clf() #Clear the current figure
pl.plot(fpr,tpr,label="In-Sample ROC Curve with area = %1.2f"%area)
pl.plot([0, 1], [0, 1], 'k') #This plots the random (equal probability line)
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title('In sample ROC rocks versus mines')
pl.legend(loc="lower right")
pl.show()
(fpr, tpr, thresholds) = roc_curve(y_test,testing_predictions)
area = auc(fpr,tpr)
pl.clf() #Clear the current figure
pl.plot(fpr,tpr,label="Out-Sample ROC Curve with area = %1.2f"%area)
pl.plot([0, 1], [0, 1], 'k')
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title('Out sample ROC rocks versus mines')
pl.legend(loc="lower right")
pl.show()
(fpr, tpr, thresholds)
cm = confusion_matrix(testing_predictions,np.array(y_test),.1)
cost1 = 1000*cm[0] + 300 * cm[2] + 200 * cm[1] + 200 * cm[3]
cm = confusion_matrix(testing_predictions,np.array(y_test),.9)
cost2 = 1000*cm[0] + 300 * cm[2] + 200 * cm[1] + 200 * cm[3]
print(cost1,cost2)
cm = confusion_matrix(testing_predictions,np.array(y_test),.1)
cost1 = 0*cm[0] + 0 * cm[2] + 5000 * cm[1] + 0 * cm[3]
cm = confusion_matrix(testing_predictions,np.array(y_test),.9)
cost2 = 0*cm[0] + 0 * cm[2] + 5000 * cm[1] + 0 * cm[3]
print(cost1,cost2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h4>See all columns</h4>
Step2: <h4>Examine the distribution of the data in column 4</h4>
Step3: <h4>Examine the dependent variable</h4>
Step4: <h4>Examine correlations</h4>
Step5: <h4>Highly correlated items = not good!</h4>
Step6: <h3>Examining the correlation of one variable with the others</h3>
Step7: <h3>Pandas scatter matrix function helps visualize the relationship between features</h3>
Step8: <h3>And we can examine quintile plots as we did with the rocks and mines data</h3>
Step9: <h2>Training a classifier on Rocks vs Mines</h2>
Step10: <h4>Convert labels R and M to 0 and 1</h4>
Step11: <h4>Divide the dataset into training and test samples</h4>
Step12: <h2>Build the model and fit the training data</h2>
Step13: <h1>Interpreting categorical prediction results</h1>
Step14: <h3>These are horrible!</h3>
Step15: <h2>We want to predict categories
Step16: <h3>Misclassification rate = (fp + fn)/number of cases</h3>
Step17: <h3>Precision and Recall</h3>
Step18: <h2>Confusion matrix (and hence precision, recall etc.) depend on the selected threshold</h2>
Step19: <h2>ROC
Step20: <h3>Repeat for the holdout sample</h3>
Step21: <h2>Drawing the ROC Curve</h2>
Step22: <h4>In-sample ROC Curve</h4>
Step23: <h4>Out-sample ROC curve</h4>
Step24: <h2>So, what threshold should we actually use?</h2>
Step25: <h3>Example
|
5,471
|
<ASSISTANT_TASK:>
Python Code:
import gdsfactory as gf
c = gf.Component("pads")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((70, 200))
c
c = gf.Component("pads_with_routes_with_bends")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((70, 200))
route = gf.routing.get_route_electrical(
pt.ports["e11"], pb.ports["e11"], bend="bend_euler", radius=30
)
c.add(route.references)
c
c = gf.Component("pads_with_routes_with_wire_corners")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((70, 200))
route = gf.routing.get_route_electrical(
pt.ports["e11"], pb.ports["e11"], bend="wire_corner"
)
c.add(route.references)
c
c = gf.Component("pads_with_routes_with_wire_corners_no_orientation")
pt = c << gf.components.pad_array(orientation=None, columns=3)
pb = c << gf.components.pad_array(orientation=None, columns=3)
pt.move((70, 200))
route = gf.routing.get_route_electrical(
pt.ports["e11"], pb.ports["e11"], bend="wire_corner"
)
c.add(route.references)
c
c = gf.Component("pads_route_quad")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((100, 200))
route = gf.routing.route_quad(pt.ports["e11"], pb.ports["e11"], layer=(49, 0))
c.add(route)
c
c = gf.Component("pads_route_from_steps")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((100, 200))
route = gf.routing.get_route_from_steps(
pb.ports["e11"],
pt.ports["e11"],
steps=[
{"y": 200},
],
cross_section=gf.cross_section.metal3,
bend=gf.components.wire_corner,
)
c.add(route.references)
c
c = gf.Component("pads_route_from_steps_None_orientation")
pt = c << gf.components.pad_array(orientation=None, columns=3)
pb = c << gf.components.pad_array(orientation=None, columns=3)
pt.move((100, 200))
route = gf.routing.get_route_from_steps(
pb.ports["e11"],
pt.ports["e11"],
steps=[
{"y": 200},
],
cross_section=gf.cross_section.metal3,
bend=gf.components.wire_corner,
)
c.add(route.references)
c
import gdsfactory as gf
c = gf.Component("pads_bundle")
pt = c << gf.components.pad_array(orientation=270, columns=3)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((100, 200))
routes = gf.routing.get_bundle_electrical(
pb.ports, pt.ports, end_straight_length=60, separation=30
)
for route in routes:
c.add(route.references)
c
c = gf.Component("pads_bundle_steps")
pt = c << gf.components.pad_array(
gf.partial(gf.components.pad, size=(30, 30)),
orientation=270,
columns=3,
spacing=(50, 0),
)
pb = c << gf.components.pad_array(orientation=90, columns=3)
pt.move((300, 500))
routes = gf.routing.get_bundle_from_steps_electrical(
pb.ports, pt.ports, end_straight_length=60, separation=30, steps=[{"dy": 100}]
)
for route in routes:
c.add(route.references)
c
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: route_quad
Step2: get_route_from_steps
Step3: Bundle of routes (get_bundle_electrical)
Step4: get bundle from steps
|
5,472
|
<ASSISTANT_TASK:>
Python Code:
def win_series(p, W=0, L=0):
"Probability of winning best-of-7 series, given a probability p of winning a game."
return (1 if W == 4 else
0 if L == 4 else
p * win_series(p, W + 1, L) +
(1 - p) * win_series(p, W, L + 1))
win_series(0.58)
def percents(items, fmt='{:4.0%}'): return ' '.join(fmt.format(item) for item in items)
def series_table(pcts=[p/100 for p in range(20, 81, 5)]):
print('W-L | Singe Game Win Percentage')
print(' | ' + percents(pcts))
for W in range(4):
print('----+' + '-' * 5 * len(pcts))
for L in reversed(range(4)):
results = [win_series(p, W, L) for p in pcts]
print('{}-{} | {}'.format(W, L, percents(results)))
series_table()
def playoffs(name, rounds):
"Print probability for team winning each series."
overall = (1, 1, 1) # (lo, med, hi) probabilities of winning it all
for (opponent, probs) in rounds:
this_round = [win_series(p) for p in probs]
overall = [overall[i] * this_round[i] for i in range(len(probs))]
print('{} vs {:8} win this round: {}; win through here: {}'.format(
name, opponent, percents(this_round), percents(overall)))
playoffs('Warriors',
[('Rockets', (0.75, 0.83, 0.85)),
('Clippers', (0.67, 0.73, 0.80)),
('Spurs', (0.45, 0.58, 0.70)),
('Cavs', (0.60, 0.67, 0.75))])
playoffs('Spurs',
[('Memphis', (0.75, 0.83, 0.85)),
('Thunder', (0.45, 0.62, 0.70)),
('Warriors', (0.30, 0.42, 0.55)),
('Cavs', (0.60, 0.67, 0.75))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85)),
('Hawks', (0.45, 0.60, 0.75)),
('Raptors', (0.40, 0.55, 0.65)),
('GSW/SAS', (0.25, 0.33, 0.40))])
def playoffs(name, rounds):
"Print probability for team winning each series."
overall = (1, 1, 1) # (lo, med, hi) probabilities of winning it all
for (opponent, probs, *args) in rounds:
this_round = [win_series(p, *args) for p in probs]
overall = [overall[i] * this_round[i] for i in range(len(probs))]
print('{} vs {:8} win this round: ({}) win through here: ({})'.format(
name, opponent, percents(this_round), percents(overall)))
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 3, 1),
('Blazers', (0.45, 0.55, 0.67)),
('Spurs', (0.30, 0.55, 0.67)),
('Cavs', (0.40, 0.60, 0.70))])
playoffs('Spurs',
[('Memphis', (0.75, 0.83, 0.85), 4, 0),
('Thunder', (0.45, 0.62, 0.70)),
('Warriors', (0.33, 0.45, 0.70)),
('Cavs', (0.60, 0.67, 0.75))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85), 4, 0),
('Hawks', (0.45, 0.60, 0.75)),
('Raptors', (0.40, 0.55, 0.65)),
('GSW/SAS', (0.30, 0.40, 0.60))])
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 4, 1),
('Blazers', (0.55, 0.67, 0.75), 3, 1),
('Spurs', (0.45, 0.60, 0.67)),
('Cavs', (0.40, 0.55, 0.67))])
playoffs('Spurs',
[('Memphis', (0.75, 0.83, 0.85), 4, 0),
('Thunder', (0.40, 0.60, 0.70), 2, 3),
('Warriors', (0.33, 0.40, 0.55)),
('Cavs', (0.40, 0.50, 0.70))])
playoffs('Thunder',
[('Dallas', (0.75, 0.83, 0.85), 4, 1),
('Spurs', (0.30, 0.40, 0.60), 3, 2),
('Warriors', (0.33, 0.40, 0.55)),
('Cavs', (0.35, 0.45, 0.60))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85), 4, 0),
('Hawks', (0.45, 0.60, 0.75), 4, 0),
('Raptors', (0.50, 0.65, 0.75)),
('GS/SA/OK', (0.33, 0.45, 0.55))])
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 4, 1),
('Blazers', (0.55, 0.67, 0.75), 4, 1),
('Thunder', (0.45, 0.63, 0.70), 0, 1),
('Cavs', (0.40, 0.55, 0.65))])
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 4, 1),
('Blazers', (0.55, 0.67, 0.75), 4, 1),
('Thunder', (0.45, 0.63, 0.70), 1, 1),
('Cavs', (0.40, 0.55, 0.65))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85), 4, 0),
('Hawks', (0.45, 0.60, 0.75), 4, 0),
('Raptors', (0.50, 0.65, 0.75), 1, 0),
('GSW', (0.35, 0.45, 0.60))])
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 4, 1),
('Blazers', (0.55, 0.67, 0.75), 4, 1),
('Thunder', (0.25, 0.55, 0.65), 1, 3),
('Cavs', (0.40, 0.55, 0.65))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85), 4, 0),
('Hawks', (0.45, 0.60, 0.75), 4, 0),
('Raptors', (0.50, 0.55, 0.70), 2, 2),
('Thunder', (0.35, 0.45, 0.60))])
playoffs('Thunder',
[('Dallas', (0.75, 0.83, 0.85), 4, 1),
('Spurs', (0.30, 0.40, 0.60), 4, 2),
('Warriors', (0.35, 0.45, 0.75), 3, 1),
('Cavs', (0.40, 0.55, 0.65))])
playoffs('Warriors',
[('Rockets', (0.50, 0.70, 0.80), 4, 1),
('Blazers', (0.55, 0.67, 0.75), 4, 1),
('Thunder', (0.35, 0.55, 0.65), 2, 3),
('Cavs', (0.40, 0.55, 0.65))])
playoffs('Cavs',
[('Pistons', (0.75, 0.83, 0.85), 4, 0),
('Hawks', (0.45, 0.60, 0.75), 4, 0),
('Raptors', (0.50, 0.55, 0.70), 3, 2),
('Thunder', (0.35, 0.45, 0.60))])
playoffs('Thunder',
[('Dallas', (0.75, 0.83, 0.85), 4, 1),
('Spurs', (0.30, 0.40, 0.60), 4, 2),
('Warriors', (0.35, 0.45, 0.75), 3, 2),
('Cavs', (0.40, 0.55, 0.65))])
series_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In other words, if you have a 58% chance of winning a game, you have a 67% chance of winning the series.
Step2: And here's a function to tabulate the chances of winning each series on the way to a title
Step3: Now I enter my subjective probability estimates (low, medium, and high), and likely opponents for each round, for the three top contenders
Step4: I have the Warriors at 50% (for the medium estimate of winning it all) and the Spurs at 20%, so I'm more of a Warriors fan than fivethirtyeight and basketball-reference, but I have very wide margins between my low and high estimate
Step5: We don't know for sure how long Curry will be out, but here are my updated odds for the Warriors, with the middle probability value representing the assumption that Curry misses the second round, and comes back in time for the Western Conference Finals at a mildly reduced capacity; the low and high probability values represent more and less severe injuries
Step6: The Spurs and Cavs are rolling; let's update their odds
Step7: So my updated odds are that the Warriors and Spurs are roughly equally likely to win (26% and 24%); the Cavs are still less likely (13%), and there is more uncertainty.
Step8: So overall, from the start of the playoffs up to May 10th, I have
Step9: Not Yet?
Step10: Yet!
Step11: But Not Done Yet
Step12: The Finals
|
5,473
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
!pip install --quiet -U apache-beam
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
' 🍓Strawberry \n',
' 🥕Carrot \n',
' 🍆Eggplant \n',
' 🍅Tomato \n',
' 🥔Potato \n',
])
| 'Strip' >> beam.Map(str.strip)
| beam.Map(print)
)
import apache_beam as beam
def strip_header_and_newline(text):
return text.strip('# \n')
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(strip_header_and_newline)
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(lambda text: text.strip('# \n'))
| beam.Map(print)
)
import apache_beam as beam
def strip(text, chars=None):
return text.strip(chars)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(strip, chars='# \n')
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
])
| 'Format' >> beam.MapTuple(
lambda icon, plant: '{}{}'.format(icon, plant))
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
chars = pipeline | 'Create chars' >> beam.Create(['# \n'])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(
lambda text, chars: text.strip(chars),
chars=beam.pvalue.AsSingleton(chars),
)
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
chars = pipeline | 'Create chars' >> beam.Create(['#', ' ', '\n'])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(
lambda text, chars: text.strip(''.join(chars)),
chars=beam.pvalue.AsIter(chars),
)
| beam.Map(print)
)
import apache_beam as beam
def replace_duration(plant, durations):
plant['duration'] = durations[plant['duration']]
return plant
with beam.Pipeline() as pipeline:
durations = pipeline | 'Durations' >> beam.Create([
(0, 'annual'),
(1, 'biennial'),
(2, 'perennial'),
])
plant_details = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 2},
{'icon': '🥕', 'name': 'Carrot', 'duration': 1},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 2},
{'icon': '🍅', 'name': 'Tomato', 'duration': 0},
{'icon': '🥔', 'name': 'Potato', 'duration': 2},
])
| 'Replace duration' >> beam.Map(
replace_duration,
durations=beam.pvalue.AsDict(durations),
)
| beam.Map(print)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Map
Step2: Examples
Step3: <table align="left" style="margin-right
Step4: <table align="left" style="margin-right
Step5: <table align="left" style="margin-right
Step6: <table align="left" style="margin-right
Step7: <table align="left" style="margin-right
Step8: <table align="left" style="margin-right
Step9: <table align="left" style="margin-right
|
5,474
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title Set up open-source environment
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
#@title Import packages
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
np.random.seed(0)
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
NUM_CLIENTS = 10
NUM_EPOCHS = 5
SHUFFLE_BUFFER = 100
def preprocess(dataset):
def map_fn(element):
return collections.OrderedDict(
x=tf.reshape(element['pixels'], [-1, 784]),
y=tf.reshape(element['label'], [-1, 1]))
return dataset.repeat(NUM_EPOCHS).shuffle(SHUFFLE_BUFFER, seed=1).map(map_fn)
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
preprocessed_example_dataset = preprocess(example_dataset)
class TestDataBackend(tff.framework.DataBackend):
async def materialize(self, data, type_spec):
client_id = int(data.uri[-1])
client_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[client_id])
return preprocess(client_dataset)
def ex_fn(
device: tf.config.LogicalDevice) -> tff.framework.DataExecutor:
return tff.framework.DataExecutor(
tff.framework.EagerTFExecutor(device),
data_backend=TestDataBackend())
factory = tff.framework.local_executor_factory(leaf_executor_fn=ex_fn)
ctx = tff.framework.ExecutionContext(executor_fn=factory)
tff.framework.set_default_context(ctx)
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
iterative_process = tff.learning.build_federated_averaging_process(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
state = iterative_process.initialize()
element_type = tff.types.StructWithPythonType(
preprocessed_example_dataset.element_spec,
container_type=collections.OrderedDict)
dataset_type = tff.types.SequenceType(element_type)
data_uris = [f'uri://{i}' for i in range(5)]
data_handle = tff.framework.CreateDataDescriptor(arg_uris=data_uris, arg_type=dataset_type)
state, metrics = iterative_process.next(state, data_handle)
print('round 1, metrics={}'.format(metrics))
NUM_ROUNDS = 11
for round_num in range(2, NUM_ROUNDS):
state, metrics = iterative_process.next(state, data_handle)
print('round {:2d}, metrics={}'.format(round_num, metrics))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Remote Data in TFF
Step2: Preparing the input data
Step3: We'll construct a preprocessing function to transform the raw examples in the EMNIST dataset from 28x28 images into 784-element arrays. Additionally, the function will shuffle the individual examples, and rename the features from pixels and label, to x and y for use with Keras. We also throw in a repeat over the data set to run several epochs.
Step4: Let's verify this works
Step5: We'll use the EMNIST dataset to train a model by loading and preprocessing individual clients (emulating distinct partitions) through an implementation of DataBackend.
Step6: Plugging the DataBackend into the ExecutionContext
Step7: Training the model
Step8: We can pass this TFF-wrapped definition of our model
Step9: The initialize computation returns the initial state of the
Step10: Now we can round a round of training
Step11: And we can run a few more rounds
|
5,475
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.datasets import load_iris
iris_data = load_iris()
iris_data
type(iris_data)
dir(iris_data)
print(iris_data['DESCR'])
print(iris_data['filename'])
print(iris_data['feature_names'])
print(iris_data['target_names'])
print(iris_data['target'])
iris_data['data']
print(len(iris_data['data']))
print(len(iris_data['target']))
import pandas as pd
import numpy as np
iris_df = pd.DataFrame(
data=np.append(
iris_data['data'],
np.array(iris_data['target']).reshape(len(iris_data['target']), 1),
axis=1),
columns=np.append(iris_data['feature_names'], ['species'])
)
iris_df.sample(n=10)
iris_df['species'] = iris_df['species'].astype('int64')
iris_df.sample(n=10)
# Your answer goes here
from sklearn.datasets import fetch_california_housing
housing_data = fetch_california_housing()
type(housing_data)
dir(housing_data)
print(housing_data['target'][:10])
import pandas as pd
import numpy as np
housing_df = pd.DataFrame(
data=np.append(
housing_data['data'],
np.array(housing_data['target']).reshape(len(housing_data['target']), 1),
axis=1),
columns=np.append(housing_data['feature_names'], ['price'])
)
housing_df.sample(n=10)
from sklearn.datasets import make_regression
features, targets = make_regression(n_samples=10, n_features=1, random_state=42)
features, targets
import matplotlib.pyplot as plt
plt.plot(features, targets, 'b.')
plt.show()
from sklearn.datasets import make_regression
features, targets = make_regression(n_samples=10, n_features=1, random_state=42, noise=5.0)
plt.plot(features, targets, 'b.')
plt.show()
# Your answer goes here
# Your answer goes here
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(features, targets)
predictions = regression.predict(features)
plt.plot(features, targets, 'b.')
plt.plot(features, predictions, 'r-')
plt.show()
features
from sklearn.preprocessing import MinMaxScaler
transformer = MinMaxScaler()
transformer.fit(features)
transformer.data_min_, transformer.data_max_
features = transformer.transform(features)
features
from sklearn.pipeline import Pipeline
features, targets = make_regression(
n_samples=10, n_features=1, random_state=42, noise=5.0)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('regression', LinearRegression())
])
pipeline.fit(features, targets)
predictions = pipeline.predict(features)
plt.plot(features, targets, 'b.')
plt.plot(features, predictions, 'r-')
plt.show()
from sklearn.metrics import mean_squared_error
mean_squared_error(targets, predictions)
print(regression.score(features, targets))
print(pipeline.score(features, targets))
# Your answer goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to scikit-learn
Step2: That's a lot to take in. Let's examine this loaded data a little more closely. First we'll see what data type this dataset is
Step3: sklearn.utils.Bunch is a type that you'll see quite often when working with datasets built into scikit-learn. It is a dictionary-like container for feature and target data within a dataset.
Step4: DESCR is a description of the dataset.
Step5: filename is the name of the source file where the data is stored.
Step6: feature_names is the name of the feature columns.
Step7: target_names, despite the name, is not the names of the target columns. There is only one column of targets.
Step8: We can now examine target and see that it contains zeros, ones, and twos. These correspond to the target names 'setosa', 'versicolor', and 'virginica'.
Step9: Last, we'll look at the data within the bunch. The data is an array of arrays. Each sub-array contains four values. These values match up with the feature_names. The first item in each sub-array is 'sepal length (cm)', the next is 'sepal width (cm)', and so on.
Step10: The number of target values should always equal the number of rows in the data.
Step11: Bunch objects are an adequate container for data. They can be used directly to feed models. However, Bunch objects are not very good for analyzing and manipulating your data.
Step12: You might notice that the integer representation of species got converted to a floating point number along the way. We can change that back.
Step13: Exercise 1
Step14: Fetching
Step15: The dataset is once again a Bunch.
Step16: We see that four of the attributes that we expect are present, but 'target_names' is missing. This is because our target is now a continuous variable (home price) and not a discrete value (iris species).
Step17: Converting a Bunch of regression data to a DataFrame is no different than for a Bunch of classification data.
Step18: Generating
Step19: We can use a visualization library to plot the regression data.
Step20: That data appears to have a very linear pattern!
Step21: There are dozens of dataset loaders and generators in the scikit-learn datasets package. When you want to play with a new machine learning algorithm, they are a great source of data for getting started.
Step22: Exercise 3
Step23: Models
Step24: At this point, don't worry too much about the details of what LinearRegression is doing. There is a deep-dive into regression problems coming up soon.
Step25: We will now create a MinMaxScaler and fit it to our feature data.
Step26: You might notice that the values are stored in arrays. This is because transformers can operate on more than one feature. In this case, however, we have only one.
Step27: Pipelines
Step28: Metrics
Step29: In this case, the MSE value alone doesn't have much meaning. Since the data that we fit the regression to isn't related to any real-world metrics, the MSE is hard to interpret alone.
Step30: The return value of the score method depends on the estimator being used. In the case of LinearRegression, the score is the $R^2$ score, where scores closer to 1.0 are better. You can find the metric that score returns in the documentation for the given estimator you're using.
|
5,476
|
<ASSISTANT_TASK:>
Python Code::
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# Step 1: Set range of clusters to try and
# create inertia values dictionary
clusters_range = (1,10)
inertia_values = {}
# Step 2: For each set of clusters fit a kmeans algorithm and add
# inertia value to interia values dictionary
for clusters in range(clusters_range[0], clusters_range[1] + 1):
k_means = KMeans(n_clusters=clusters,
random_state=101)
k_means.fit(X_train)
inertia_values[str(clusters)] = k_means.inertia_
# Step 3: Plot inertia values dictionary to assess optimum
# number of clusters to use
plt.figure(figsize=(10,6))
plt.plot(inertia_values.keys(),
inertia_values.values(),
marker='o')
plt.title('KMeans Clustering Elbow Method')
plt.ylabel('Inertia')
plt.xlabel('Clusters')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
5,477
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
Apenas a partir dos valores
obj = pd.Series([4, 7, -5, 3])
obj
obj.values
obj.index
A partir dos valores e dos índices
obj2 = pd.Series([4, 7, -5, 3], index=['d','b','a','c'])
obj2
obj2.index
A partir de um dictionary
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj3 = pd.Series(sdata)
obj3
A partir de um dictionary e dos índices
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj4 = pd.Series(sdata, index=states)
obj4
obj2['a']
obj2['d'] = 6
obj2['d']
obj2[['c','a','d']]
obj2[obj2 > 0]
Multiplicação por um escalar
obj2 * 2
Operações de vetor do numpy
import numpy as np
np.exp(obj2)
Funções que funcionam com dictionaries
'b' in obj2
'e' in obj2
Funções para identificar dados faltando
obj4.isnull()
obj4.notnull()
Operações aritméticas com alinhamento automático dos índices
obj3 + obj4
A partir de um dictionary de vetores
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'], \
'year': [2000, 2001, 2002, 2001, 2002], \
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
frame = pd.DataFrame(data)
frame
A partir de um dictionary em uma ordem específica das colunas
pd.DataFrame(data, columns=['year', 'state', 'pop'])
A partir de um dictionary e dos índices das colunas e/ou dos índices das linhas
frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index=['one', 'two', 'three', 'four', 'five'])
frame2
A partir de um dictionary de dictionaries aninhados
pop = {'Nevada': {2001: 2.4, 2002: 2.9}, 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame3 = pd.DataFrame(pop)
frame3
Acessando colunas como em uma Series ou dictionary
frame2['state']
Como colunas como um atributo
frame2.year
Acessando linhas com o nome da linha
frame2.ix['three']
Acessando linhas com o índice da linha
frame2.ix[3]
Modificando uma coluna com um valor
frame2['debt'] = 16.5
frame2
Modificando uma coluna com um vetor
frame2['debt'] = np.arange(5.)
frame2
Modificando uma coluna com uma Series
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2
Adicionando uma coluna que não existe
frame2['eastern'] = frame2.state == 'Ohio'
frame2
Deletando uma coluna
del frame2['eastern']
frame2.columns
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Series
Step6: Acessando elementos de uma Series
Step12: Algumas operações permitidas em uma Series
Step17: DataFrame
Step27: Note que estas não são todas as formas possíveis de se fazê-lo. Para uma visão mais completa veja a seguinte tabela com as possíveis entradas para o construtor do DataFrame
|
5,478
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import visuals as vs # Supplementary code
from sklearn.cross_validation import ShuffleSplit
from IPython.display import display
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MDEV']
print prices.size
features = data.drop('MDEV', axis = 1)
display(data.head())
# Success
print "This housing dataset has {} data points with {} variables each.".format(*data.shape)
# Minimum price of the data
minimum_price = np.min(prices)
# Maximum price of the data
maximum_price = np.max(prices)
# Mean price of the data
mean_price = np.mean(prices)
# Median price of the data
median_price = np.median(prices)
# Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print "Statistics for Boston housing dataset:\n"
print "Minimum price: ${:,.2f}".format(minimum_price)
print "Maximum price: ${:,.2f}".format(maximum_price)
print "Mean price: ${:,.2f}".format(mean_price)
print "Median price ${:,.2f}".format(median_price)
print "Standard deviation of prices: ${:,.2f}".format(std_price)
import matplotlib.pyplot as plt
display(features.head())
plt.plot(features.LSTAT, prices, 'o')
plt.title('LSTAT vs PRICES')
plt.xlabel('LSTAT')
plt.ylabel('PRICES')
plt.plot(features.RM, prices, 'o')
plt.title('RM vs PRICES')
plt.xlabel('RM')
plt.ylabel('PRICES')
plt.plot(features.PTRATIO, prices, 'o')
plt.title('PTRATIO vs PRICES')
plt.xlabel('PTRATIO')
plt.ylabel('PRICES')
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
Calculates and returns the performance score between
true and predicted values based on the metric chosen.
# Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)
# Import 'train_test_split'
from sklearn import cross_validation
# Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = (cross_validation.train_test_split(features, prices, test_size=0.2, random_state=0))
# Success
print "Training and testing split was successful."
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
vs.ModelComplexity(X_train, y_train)
# Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import make_scorer
from sklearn.grid_search import GridSearchCV
def fit_model(X, y):
Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y].
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
# Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': [1,2,3,4,5,6,7,8,9,10]}
# Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# Create the grid search object
grid = GridSearchCV(estimator=regressor, param_grid=params, scoring=scoring_fnc, cv=cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])
# Produce a matrix for client data
client_data = [[5, 34, 15], # Client 1
[4, 55, 22], # Client 2
[8, 7, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
import matplotlib.pyplot as plt
plt.hist(prices, bins = 30)
for price in reg.predict(client_data):
plt.axvline(price, c = 'r', lw = 3)
from sklearn.neighbors import NearestNeighbors
num_neighbors=5
def nearest_neighbor_price(x):
def find_nearest_neighbor_indexes(x, X): # x is your vector and X is the data set.
neigh = NearestNeighbors( num_neighbors )
neigh.fit(X)
distance, indexes = neigh.kneighbors( x )
return indexes
indexes = find_nearest_neighbor_indexes(x, features)
sum_prices = []
for i in indexes:
sum_prices.append(prices[i])
neighbor_avg = np.mean(sum_prices)
return neighbor_avg
print nearest_neighbor_price( [4, 55, 22])
index = 0
for i in client_data:
val=nearest_neighbor_price(i)
index += 1
print "The predicted {} nearest neighbors price for home {} is: ${:,.2f}".format(num_neighbors,index, val)
vs.PredictTrials(features, prices, fit_model, client_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Feature Observation
Step4: Developing a Model
Step5: Implementation
Step6: Benefit of splitting the data set into Training and Testing sets
Step7: Learning the Data
Step9: Bias-Variance Tradeoff
Step10: Making Predictions
Step11: Predicting Selling Prices
Step12: Clients 1, 2, and 3 will be recommended to sell their houses at \$324,240.00, \$189,123.53, and \$942,666.67, respectively. By looking at the training data statistics above, the median(~\$439K) and mean (\$454,342) are very close to each other, ie, house prices are more or less normally distributed. This means house prices of 68\% of the houses lie between \$289,171 and \$619,514 and 95 percent of the houses lie between \$124,000 and \$784,685. Hence the client3's house is at the right tail of the distribution. Since the training data contains too few samples in this price range, the error bar on this predicted value will be much higher than the error bar on the client1's predicted value. The price of this house should be above average price. A model with more features than just 3 features listed above can do better for this house. The new features could be the number of bathrooms, lot size, type of flooring, etc.
Step13: Sensitivity
|
5,479
|
<ASSISTANT_TASK:>
Python Code:
import emcee
from dustcurve import model
import seaborn as sns
import numpy as np
from dustcurve import pixclass
import matplotlib.pyplot as plt
import pandas as pd
import warnings
from dustcurve import io
from dustcurve import hputils
from dustcurve import kdist
import h5py
from dustcurve import globalvars as gv
%matplotlib inline
f=h5py.File('/n/fink1/czucker/Output/2degrees_jul1_0.1sig_cropped.h5')
samples=f['/chains']
nsteps=samples.shape[2]
ndim=samples.shape[3]
#Extract the coldest [beta=1] temperature chain from the sampler object; discard first half of samples as burnin
samples_cold = samples[0,:,int(.5*nsteps):,:]
traces_cold = samples_cold.reshape(-1, ndim).T
#find best fit values for each of the 24 parameters (12 d's and 12 c's)
theta=pd.DataFrame(traces_cold)
quantile_50=theta.quantile(.50, axis=1).values
quantile_84=theta.quantile(.84, axis=1).values
quantile_16=theta.quantile(.16, axis=1).values
upperlim=quantile_84-quantile_50
lowerlim=quantile_50-quantile_16
#print out distances
for i in range(0,int(len(quantile_50)/2)):
print('d%i: %.3f + %.3f - %.3f' % (i+1,quantile_50[i],upperlim[i], lowerlim[i]))
#print out coefficients
for i in range(int(len(quantile_50)/2), int(len(quantile_50))):
print('c%i: %.3f + %.3f - %.3f' % (i+1-int(len(quantile_50)/2),quantile_50[i],upperlim[i], lowerlim[i]))
#set up subplots for chain plotting
axes=['ax'+str(i) for i in range(ndim)]
fig, (axes) = plt.subplots(ndim, figsize=(10,60))
plt.tight_layout()
for i in range(0,ndim):
if i<int(ndim/2):
axes[i].set(ylabel='d%i' % (i+1))
else:
axes[i].set(ylabel='c%i' % (i-5))
#plot traces for each parameter
for i in range(0,ndim):
sns.tsplot(traces_cold[i],ax=axes[i])
#set up subplots for histogram plotting
axes=['ax'+str(i) for i in range(ndim)]
fig, (axes) = plt.subplots(ndim, figsize=(10,60))
plt.tight_layout()
for i in range(0,ndim):
if i<int(ndim/2):
axes[i].set(ylabel='d%i' % (i+1))
else:
axes[i].set(ylabel='c%i' % (i-5))
#plot traces for each parameter
for i in range(0,ndim):
sns.distplot(traces_cold[i],ax=axes[i],hist=True,norm_hist=False)
from dustcurve import plot_posterior
ratio=0.06
plot_posterior.plot_samples(np.asarray(post_all),np.linspace(4,19,120),np.linspace(0,7,700),quantile_50,traces_cold,ratio,gv.unique_co,y_range=[0,2],vmax=20,normcol=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's see what our chains look like by producing trace plots
Step2: Now we are going to use the seaborn distplot function to plot histograms of the last half of the traces for each parameter.
Step3: Now we want to see how similar the parameters at different steps are. To do this, we draw one thousand random samples from the last half of the chain and plot the reddening profile corresponding to those parameters in light blue. Then, we plot the "best fit" reddening profile corresponding to the 50th quantile parameters (essentially the median of the last half of the chains). In all cases, we take the average of the CO for all nside 128 pixels at each slice. As you can see, drawing random samples from the last half of the chain produce reddening profiles essentially identical to the best fit values.
|
5,480
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
import numpy as np
from sklearn.linear_model import LogisticRegression
df = pd.read_csv("hanford.csv")
df.head()
df.mean()
df.median()
#range
df["Exposure"].max() - df["Exposure"].min()
#range
df["Mortality"].max() - df["Mortality"].min()
df.std()
df.corr()
#IQR
IQR= df['Exposure'].quantile(q=0.75)- df['Exposure'].quantile(q=0.25)
Q1= df['Exposure'].quantile(q=0.25) #1st Quartile
Q1
Q2= df['Exposure'].quantile(q=0.5) #2nd Quartile (Median)
Q3= df['Exposure'].quantile(q=0.75) #3rd Quartile
UAL= (IQR * 1.5) +Q3
UAL
LAL= Q1- (IQR * 1.5)
LAL
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read in the hanford.csv file in the data/ folder
Step2: 3. Calculate the basic descriptive statistics on the data
Step3: 4. Find a reasonable threshold to say exposure is high and recode the data
Step4: UAL= (IQR * 1.5) +Q3
|
5,481
|
<ASSISTANT_TASK:>
Python Code:
#|all_slow
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
pretrained_weights = 'gpt2'
tokenizer = GPT2TokenizerFast.from_pretrained(pretrained_weights)
model = GPT2LMHeadModel.from_pretrained(pretrained_weights)
ids = tokenizer.encode('This is an example of text, and')
ids
tokenizer.decode(ids)
import torch
t = torch.LongTensor(ids)[None]
preds = model.generate(t)
preds.shape,preds[0]
tokenizer.decode(preds[0].numpy())
from fastai.text.all import *
path = untar_data(URLs.WIKITEXT_TINY)
path.ls()
df_train = pd.read_csv(path/'train.csv', header=None)
df_valid = pd.read_csv(path/'test.csv', header=None)
df_train.head()
all_texts = np.concatenate([df_train[0].values, df_valid[0].values])
class TransformersTokenizer(Transform):
def __init__(self, tokenizer): self.tokenizer = tokenizer
def encodes(self, x):
toks = self.tokenizer.tokenize(x)
return tensor(self.tokenizer.convert_tokens_to_ids(toks))
def decodes(self, x): return TitledStr(self.tokenizer.decode(x.cpu().numpy()))
splits = [range_of(df_train), list(range(len(df_train), len(all_texts)))]
tls = TfmdLists(all_texts, TransformersTokenizer(tokenizer), splits=splits, dl_type=LMDataLoader)
tls.train[0],tls.valid[0]
tls.tfms(tls.train.items[0]).shape, tls.tfms(tls.valid.items[0]).shape
show_at(tls.train, 0)
show_at(tls.valid, 0)
bs,sl = 4,256
dls = tls.dataloaders(bs=bs, seq_len=sl)
dls.show_batch(max_n=2)
def tokenize(text):
toks = tokenizer.tokenize(text)
return tensor(tokenizer.convert_tokens_to_ids(toks))
tokenized = [tokenize(t) for t in progress_bar(all_texts)]
class TransformersTokenizer(Transform):
def __init__(self, tokenizer): self.tokenizer = tokenizer
def encodes(self, x):
return x if isinstance(x, Tensor) else tokenize(x)
def decodes(self, x): return TitledStr(self.tokenizer.decode(x.cpu().numpy()))
tls = TfmdLists(tokenized, TransformersTokenizer(tokenizer), splits=splits, dl_type=LMDataLoader)
dls = tls.dataloaders(bs=bs, seq_len=sl)
dls.show_batch(max_n=2)
class DropOutput(Callback):
def after_pred(self): self.learn.pred = self.pred[0]
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), cbs=[DropOutput], metrics=Perplexity()).to_fp16()
learn.validate()
learn.lr_find()
learn.fit_one_cycle(1, 1e-4)
df_valid.head(1)
prompt = "\n = Unicorn = \n \n A unicorn is a magical creature with a rainbow tail and a horn"
prompt_ids = tokenizer.encode(prompt)
inp = tensor(prompt_ids)[None].cuda()
inp.shape
preds = learn.model.generate(inp, max_length=40, num_beams=5, temperature=1.5)
tokenizer.decode(preds[0].cpu().numpy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this tutorial, we will see how we can use the fastai library to fine-tune a pretrained transformer model from the transformers library by HuggingFace. We will use the mid-level API to gather the data. Even if this tutorial is self contained, it might help to check the imagenette tutorial to have a second look on the mid-level API (with a gentle introduction using the higher level APIs) in computer vision.
Step2: We can use several versions of this GPT2 model, look at the transformers documentation for more details. Here we will use the basic version (that already takes a lot of space in memory!) You can change the model used by changing the content of pretrained_weights (if it's not a GPT2 model, you'll need to change the classes used for the model and the tokenizer of course).
Step3: Before we move on to the fine-tuning part, let's have a look at this tokenizer and this model. The tokenizers in HuggingFace usually do the tokenization and the numericalization in one step (we ignore the padding warning for now)
Step4: Like fastai Transforms, the tokenizer has a decode method to give you back a text from ids
Step5: The model can be used to generate predictions (it is pretrained). It has a generate method that expects a batch of prompt, so we feed it our ids and add one batch dimension (there is a padding warning we can ignore as well)
Step6: The predictions, by default, are of length 20
Step7: We can use the decode method (that prefers a numpy array to a tensor)
Step8: Bridging the gap with fastai
Step9: Preparing the data
Step10: Let's have a look at what those csv files look like
Step11: We gather all texts in one numpy array (since it will be easier to use this way with fastai)
Step12: To process this data to train a model, we need to build a Transform that will be applied lazily. In this case we could do the pre-processing once and for all and only use the transform for decoding (we will see how just after), but the fast tokenizer from HuggingFace is, as its name indicates, fast, so it doesn't really impact performance to do it this way.
Step13: Two comments on the code above
Step14: We specify dl_type=LMDataLoader for when we will convert this TfmdLists to DataLoaders
Step15: They look the same but only because they begin and end the same way. We can see the shapes are different
Step16: And we can have a look at both decodes using show_at
Step17: The fastai library expects the data to be assembled in a DataLoaders object (something that has a training and validation dataloader). We can get one by using the dataloaders method. We just have to specify a batch size and a sequence length. We'll train with sequences of size 256 (GPT2 used sequence length 1024, but not everyone has enough GPU RAM for that)
Step18: Note that you may have to reduce the batch size depending on your GPU RAM.
Step19: Another way to gather the data is to preprocess the texts once and for all and only use the transform to decode the tensors to texts
Step20: Now we change the previous Tokenizer like this
Step21: In the <code>encodes</code> method, we still account for the case where we get something that's not already tokenized, just in case we were to build a dataset with new texts using this transform.
Step22: And we can check it still works properly for showing purposes
Step23: Fine-tuning the model
Step24: Of course we could make this a bit more complex and add some penalty to the loss using the other part of the tuple of predictions, like the RNNRegularizer.
Step25: We can check how good the model is without any fine-tuning step (spoiler alert, it's pretty good!)
Step26: This lists the validation loss and metrics (so 26.6 as perplexity is kind of amazing).
Step27: The learning rate finder curve suggests picking something between 1e-4 and 1e-3.
Step28: Now with just one epoch of fine-tuning and not much regularization, our model did not really improve since it was already amazing. To have a look at some generated texts, let's take a prompt that looks like a wikipedia article
Step29: Article seems to begin with new line and the title between = signs, so we will mimic that
Step30: The prompt needs to be tokenized and numericalized, so we use the same function as before to do this, before we use the generate method of the model.
|
5,482
|
<ASSISTANT_TASK:>
Python Code:
import time
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import cross_val_score
#from sklearn.model_selection import StratifiedShuffleSplit
#from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.utils import shuffle
from scipy import interp
import scipy.stats as stats
import pickle
# My libraries
import data.preprocessing as preproc
import project_fxns.rt_window_prediction as rtwin
%matplotlib inline
%load_ext autoreload
%autoreload 2
toy = pd.DataFrame([[1,2,3,0],
[0,0,0,0],
[0.5,1,0,0]], dtype=float,
columns=['1', '2', '3', '4'],
index=['sample_%s' % i for i in range(1,4)])
preproc.prevalence_threshold(toy)
### Subdivide the data into a feature table
local_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/'\
'projects'
data_path = local_path + '/revo_healthcare/data/processed/MTBLS72/positive_mode/'\
'mtbls_no_retcor_bw2.csv'
## Import the data and remove extraneous columns
df = pd.read_csv(data_path, index_col=0)
# Replace X's at the beginning of sample names
new_idx = [i.replace('X', '') for i in df.columns]
df.columns = new_idx
print df.columns
df.shape
df.head()
# Make a new index of mz:rt
mz = df.loc[:,"mz"].astype('str')
rt = df.loc[:,"rt"].astype('str')
idx = mz+':'+rt
df.index = idx
# separate samples from xcms/camera things to make feature table
not_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax',
'npeaks', 'positive_mode',
]
samples_list = df.columns.difference(not_samples)
mz_rt_df = df[not_samples]
# Convert to (samples x features) format - our standard
X_df_raw = df[samples_list].T
new_idx = [i.replace('X', '') for i in X_df_raw.index]
X_df_raw.index = new_idx
# get mapping between sample name and sample class
path_sample_class_map = (local_path +
'/revo_healthcare/data/raw/MTBLS72/s_Plasma_AD_Lipidomics.txt')
class_column = 'Factor Value[Cognitive Status]'
class_df = pd.read_csv(path_sample_class_map,
sep='\t')
# Set index as sample name
class_df.set_index('Sample Name', inplace=True)
class_df = class_df[class_column]
# select only positive values from positive-ion mode
class_df = class_df[class_df.index.str.contains('POS')]
print class_df.head(10)
print "Class label shape: ", class_df.shape
print "feature table shape: ", X_df_raw.shape
#class_df.rename('class', inplace=True)
print class_df.head()
print '\n\nClass labels: ', pd.unique(class_df.values)
# Get case and control dataframes
case_labels = class_df[class_df=='aMCI/AD'].index
control_labels = class_df[class_df == 'Normal Control'].index
case = X_df_raw.loc[case_labels]
control = X_df_raw.loc[control_labels]
# Match between feature table and metadata and assert that they're in the same order.
# then define the numpy-arrays for X and y
class_df = class_df[X_df_raw.index].sort_index()
X_df_raw = X_df_raw.sort_index()
print 'Class values:', class_df.unique()
assert (class_df.index == X_df_raw.index).all()
# convert classes to numbers
le = preprocessing.LabelEncoder()
le.fit(class_df)
y = le.transform(class_df)
print y
print "class-labels: ", le.classes_
# fill nan values with 1/2 the minimum from each sample
fill_val = X_df_raw.min(axis=1) / 2.0
# Only keep features present in at least 70% of samples
# Note that this means 70% total, not 70% in a particular class
X_df_filtered = preproc.prevalence_threshold(X_df_raw,
threshold=0.7)
print 'Raw shape', X_df_raw.shape
print 'Filtered at 70% prevalence', X_df_filtered.shape
# must transpose, b/c fillna only operates along columns
X_df_filled = X_df_filtered.fillna(value=fill_val, )
#X_pqn_df_raw = preproc.correct_dilution_factor(X_df_raw, plot=True)
X_pqn_df_filled = preproc.correct_dilution_factor(X_df_filled, plot=True)
X_pqn_df_filled_log = np.log10(X_pqn_df_filled)
# Do mann-whitney on case vs control
def mw_pval_dist(case, control):
'''
case - dataframe containing case
control - dataframe with control samples
All should have same features (columns)
'''
# get parametric pvals
mann_whitney_vals = pd.DataFrame(np.full([case.shape[1],2], np.nan),
index=case.columns, columns= ['u', 'pval'])
for idx, case_vals in case.iteritems():
control_vals = control[idx]
u, pval = stats.mannwhitneyu(case_vals, control_vals)
mann_whitney_vals.loc[idx, 'u'] = u
mann_whitney_vals.loc[idx, 'pval'] = pval
# plot mw pval distribution
mann_whitney_vals.hist('pval')
plt.title('mann-whitney pval between case and control')
plt.show()
# plot distribution of mean intensities
case_mean = case.mean(axis=0)
ctrl_mean = control.mean(axis=0)
sns.distplot(np.log10(case_mean), label='case')
sns.distplot(np.log10(ctrl_mean), label='control')
plt.xlabel('log_10 intensity')
plt.title('Mean intensity of case vs. control')
plt.legend()
plt.show()
u, pval = stats.mannwhitneyu(case_mean, ctrl_mean)
print 'pval (MannW) of intensities between case and control: ', pval
#print('Raw intensities\n\n')
#mw_pval_dist(X_df_raw.loc[case_labels], X_df_raw.loc[control_labels])
print('*'*50+'\nNaN filled with 1/2 min')
mw_pval_dist(X_df_filled.loc[case_labels], X_df_filled.loc[control_labels])
#print('*'*50+'\n Raw pqn_normalized')
#mw_pval_dist(X_pqn_df_raw.loc[case_labels], X_pqn_df_raw.loc[control_labels])
print('*'*50+'\n NaN filled with 1/2 min, pqn normalized')
mw_pval_dist(X_pqn_df_filled.loc[case_labels], X_pqn_df_filled.loc[control_labels])
print('*'*50+'\n NaN filled with 1/2 min, pqn normalized, log10 transformed')
mw_pval_dist(X_pqn_df_filled_log.loc[case_labels],
X_pqn_df_filled_log.loc[control_labels])
# Add back mz, rt, etc. columns to feature table and reshape it to be
# (feats x samples)
X_pqn_df_filled_mzrt = pd.concat([df[not_samples].T, X_pqn_df_filled],
axis=0).T
# run a sliding windonw
# Make sliding window
min_val = 0
max_val = df['rt'].max()
width = max_val / 5
step = width / 2
sliding_window = rtwin.make_sliding_window(min_val,
max_val, width, step)
# Paths to plot things
path = ('/revo_healthcare/presentations/isaac_bats/'+
'rt_window_plots/MTBLS72/')
output_path = local_path + path
# run classifier & plot on sliding window
n_iter = 50
test_size = 0.3
rf_trees = 1000
# Run rt-sliding-window classifier
rtwin.sliding_rt_window_aucs(X_pqn_df_filled_mzrt, y, sliding_window, not_samples,
rf_trees=rf_trees, n_iter=n_iter, test_size=test_size,
output_path=output_path)
auc_vals = pickle.load(open(output_path+'/auc_vals.pkl', 'rb'))
fig_path = output_path + 'auc_vs_rt.pdf'
rtwin.plot_auc_vs_rt(auc_vals, sliding_window, df, fig_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Get the xcms data </h2>
Step2: <h2> Get mappings between sample names, file names, and sample classes </h2>
Step3: <h2> Convert class labels to binary </h2>
Step4: <h2> Preprocess feature table </h2>
Step5: <h2> There is a bias in intensity values between case and control. Can I eliminate that through some normalization? </h2>
Step6: <h2> It seems really weird to me that almost half of all features are have a p-vals less than 0.05 </h2>
|
5,483
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
features_dataframe = load_data()
n = features_dataframe.shape[0]
train_size = 0.8
test_size = 1 - train_size + 0.005
train_dataframe = features_dataframe.iloc[int(n * test_size):]
test_dataframe = features_dataframe.iloc[:int(n * test_size)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
5,484
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD-3-Clause
from mne.io.fiff.raw import read_raw_fif
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa: F401
from mne.viz import (plot_alignment, snapshot_brain_montage, set_3d_view)
misc_path = mne.datasets.misc.data_path()
ecog_data_fname = op.join(misc_path, 'ecog', 'sample_ecog_ieeg.fif')
subjects_dir = op.join(misc_path, 'ecog')
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
raw = read_raw_fif(ecog_data_fname)
raw.pick_channels([f'G{i}' for i in range(1, 257)]) # pick just one grid
# Since we loaded in the ecog data from FIF, the coordinates
# are in 'head' space, but we actually want them in 'mri' space.
# So we will apply the head->mri transform that was used when
# generating the dataset (the estimated head->mri transform).
montage = raw.get_montage()
trans = mne.coreg.estimate_head_mri_t('sample_ecog', subjects_dir)
montage.apply_trans(trans)
fig = plot_alignment(raw.info, trans=trans, subject='sample_ecog',
subjects_dir=subjects_dir, surfaces=dict(pial=0.9))
set_3d_view(figure=fig, azimuth=20, elevation=80)
xy, im = snapshot_brain_montage(fig, raw.info)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in raw.ch_names])
# Compute beta power to visualize
raw.load_data()
beta_power = raw.filter(20, 30).apply_hilbert(envelope=True).get_data()
beta_power = beta_power.max(axis=1) # take maximum over time
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
cmap = ax.scatter(*xy_pts.T, c=beta_power, s=100, cmap='coolwarm')
cbar = fig2.colorbar(cmap)
cbar.ax.set_ylabel('Beta Power')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=80, color='r')
fig.tight_layout()
ax.set_axis_off()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Project 3D electrodes to a 2D snapshot
Step3: Manually creating 2D electrode positions
|
5,485
|
<ASSISTANT_TASK:>
Python Code:
!pip install -r requirements.txt
import pandas as pd
import numpy as np
df=pd.read_csv('talks.csv')
df.head()
year_labeled=
year_predict=
description_labeled = df[df.year==year_labeled]['description']
description_predict = df[df.year==year_predict]['description']
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(ngram_range=(1, 2), stop_words="english")
vectorized_text_labeled = vectorizer.fit_transform( ... )
occurrences = np.asarray(vectorized_text_labeled.sum(axis=0)).ravel()
terms = ( ... )
counts_df = pd.DataFrame({'terms': terms, 'occurrences': occurrences}).sort_values('occurrences', ascending=False)
counts_df
vectorized_text_predict = vectorizer.transform( ... )
vectorized_text_predict.toarray()
from sklearn.model_selection import train_test_split
labels = df[df.year == 2017]['label']
test_size= ...
X_train, X_test, y_train, y_test = train_test_split(vectorized_text_labeled, labels, test_size=test_size, random_state=1)
import sklearn
from sklearn.svm import LinearSVC
classifier = LinearSVC(verbose=1)
classifier.fit(X_train, y_train)
y_pred = classifier.predict( ... )
report = sklearn.metrics.classification_report( ... , ... )
print(report)
predicted_talks_vector = classifier.predict( ... )
df_2018 = df[df.year==2018]
predicted_talk_indexes = predicted_talks_vector.nonzero()[0] + len(df[df.year==2017])
df_2018_talks = df_2018.loc[predicted_talk_indexes]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise A
Step2: Here is a brief description of the interesting fields.
Step3: Quick Introduction to Text Analysis
Step4: Extra Credit
Step5: Exercise 2.2 Inspect the vocabulary
Step6: Exercise 2.3 Transform documents for prediction into document-term matrix
Step7: Exercise 3
Step8: Exercise 3.1 Inspect the shape of each output of train_test_split
Step9: Exercise 5
Step10: Exercise 6
Step11: Using the predicted_talk_indexes get the talk id, description, presenters, title and location and talk date.
|
5,486
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (9,6)
df = pd.read_csv("../data/creditRisk.csv")
df.head()
from plotnine import *
ggplot(df, aes(x = "Income", y = "Credit History", color = "Risk")) + geom_point(size = 4)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
df['Credit History'].unique()
df['Credit History'].unique()
le.fit(df['Credit History'].unique())
df['Credit History'].tail()
# Converting the categorical data using label encoder
df['Credit History'] = le.transform(df['Credit History'])
df['Credit History'].tail()
le.classes_
df.Risk.unique()
Risk_mapping = {
'High': 2,
'Moderate': 1,
'Low': 0}
df.Risk.tail()
df['Risk'] = df['Risk'].map(Risk_mapping)
df.Risk.tail()
df.head()
data = df.iloc[:,0:2]
target = df['Risk']
from sklearn.linear_model import LogisticRegression
clf_LR = LogisticRegression()
clf_LR
clf_LR = clf_p.fit(data, target)
from modelvis import plot_classifier_2d
X = np.array(data)
y = np.array(target)
plot_classifier_2d(clf_LR, data,target, probability = False)
data = df.iloc[:,0:2]
target = df.iloc[:,2:3]
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf
clf = clf.fit(data, target)
import pydotplus
from IPython.display import Image
data.columns
target.columns
dot_data = tree.export_graphviz(clf, out_file='tree.dot', feature_names=data.columns,
class_names=['Low', 'Moderate', 'High'], filled=True,
rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_file('tree.dot')
Image(graph.create_png())
def plot_classifier_2d(clf, data, target):
x_min, x_max = data.iloc[:,0].min(), data.iloc[:,0].max()
y_min, y_max = data.iloc[:,1].min(), data.iloc[:,1].max()
xx, yy = np.meshgrid(
np.arange(x_min, x_max, (x_max - x_min)/100),
np.arange(y_min, y_max, (y_max - y_min)/100))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap="viridis", alpha = 0.5)
plt.colorbar(cs)
plt.scatter(x = data.iloc[:,0], y = data.iloc[:,1], c = target, s = 100, cmap="magma")
plot_classifier_2d(clf, data,target)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting the Data
Step2: Preparing Data
Step3: Lets use a dictionary for encoding nominal variable
Step4: Classifier - Logistic Regression
Step5: Classifier
Step6: Visualise the Tree
Step7: Understanding how the Decision Tree works
|
5,487
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
codes_batch = sess.run(vgg.relu6, feed_dict={input_ : images})
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
codes[0:3]
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)# Your one-hot encoded labels array here
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(ss.split(codes, labels))
half_val_len = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
fc1 = tf.layers.dense(inputs=inputs_, units=128, activation=tf.nn.relu)
logits = logits = tf.layers.dense(inputs=fc1, units=5, activation=None)# output layer logits
entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels_, logits=logits)# cross entropy loss
cost = tf.reduce_mean(entropy)
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost) # training optimizer
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, n_batches=10):
Return a generator that yields batches from arrays x and y.
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
saver = tf.train.Saver()
with tf.Session() as sess:
epochs = 10
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
batch_cost, _, batch_acc = sess.run([cost, optimizer, accuracy], feed_dict={inputs_: x,
labels_: y})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost),
"Accuracy: {:.4f}".format(batch_acc))
# TODO: Your training code here
saver.save(sess, "checkpoints/flowers.ckpt")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Flower power
Step2: ConvNet Codes
Step3: Below I'm running images through the VGG network in batches.
Step4: Building the Classifier
Step5: Data prep
Step6: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
Step7: If you did it right, you should see these sizes for the training sets
Step9: Batches!
Step10: Training
Step11: Testing
Step12: Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
|
5,488
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
5,489
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(123)
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (10, 6)
plt.set_cmap("viridis")
from skopt.benchmarks import branin as _branin
def branin(x, noise_level=0.):
return _branin(x) + noise_level * np.random.randn()
from matplotlib.colors import LogNorm
def plot_branin():
fig, ax = plt.subplots()
x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]
fx = np.reshape([branin(val) for val in vals], (100, 100))
cm = ax.pcolormesh(x_ax, y_ax, fx,
norm=LogNorm(vmin=fx.min(),
vmax=fx.max()))
minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])
ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14, lw=0, label="Minima")
cb = fig.colorbar(cm)
cb.set_label("f(x)")
ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plot_branin()
from functools import partial
from skopt import gp_minimize, forest_minimize, dummy_minimize
func = partial(branin, noise_level=2.0)
bounds = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [2.5, 7.5]
n_calls = 80
def run(minimizer, n_iter=20):
return [minimizer(func, bounds, x0=x0, n_calls=n_calls, random_state=n)
for n in range(n_iter)]
# Random search
dummy_res = run(dummy_minimize)
# Gaussian processes
gp_res = run(gp_minimize)
# Random forest
rf_res = run(partial(forest_minimize, base_estimator="rf"))
# Extra trees
et_res = run(partial(forest_minimize, base_estimator="et"))
from skopt.plots import plot_convergence
plot_convergence(("dummy_minimize", dummy_res),
("gp_minimize", gp_res),
("forest_minimize('rf')", rf_res),
("forest_minimize('et)", et_res),
true_minimum=0.397887, yscale="log")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bayesian optimization or sequential model-based optimization uses a surrogate model
Step2: This shows the value of the two-dimensional branin function and the three minima.
Step3: Note that this can take a few minutes.
|
5,490
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
np.max(chars)+1
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the first split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 50)
train_x.shape
train_x[:,:50]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# One-hot encoding the input and target characters
x_one_hot = tf.one_hot(inputs, num_classes)
y_one_hot = tf.one_hot(targets, num_classes)
### Build the RNN layers
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
### Run the data through the RNN layers
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one output row for each step for each batch
seq_output = tf.concat(outputs, axis=1)
output = tf.reshape(seq_output, [-1, lstm_size])
# Now connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(num_classes))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and batch
logits = tf.matmul(output, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
preds = tf.nn.softmax(logits, name='predictions')
# Reshape the targets to match the logits
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
cost = tf.reduce_mean(loss)
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
# NOTE: I'm using a namedtuple here because I think they are cool
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 10
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
keep_prob = 0.5
epochs = 20
# Save every N iterations
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/i{}_l{}_v{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/____.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step6: Making training and validation batches
Step7: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step8: Looking at the size of this array, we see that we have rows equal to the batch size. When we want to get a batch out of here, we can grab a subset of this array that contains all the rows but has a width equal to the number of steps in the sequence. The first batch looks like this
Step9: I'll write another function to grab batches out of the arrays made by split_data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step10: Building the model
Step11: Hyperparameters
Step12: Training
Step13: Saved checkpoints
Step14: Sampling
Step15: Here, pass in the path to a checkpoint and sample from the network.
|
5,491
|
<ASSISTANT_TASK:>
Python Code:
import sys
import random
import numpy as np
import heapq
import json
import time
BIG_PRIME = 9223372036854775783
def random_parameter():
return random.randrange(0, BIG_PRIME - 1)
class Sketch:
def __init__(self, delta, epsilon, k):
Setup a new count-min sketch with parameters delta, epsilon and k
The parameters delta and epsilon control the accuracy of the
estimates of the sketch
Cormode and Muthukrishnan prove that for an item i with count a_i, the
estimate from the sketch a_i_hat will satisfy the relation
a_hat_i <= a_i + epsilon * ||a||_1
with probability at least 1 - delta, where a is the the vector of all
all counts and ||x||_1 is the L1 norm of a vector x
Parameters
----------
delta : float
A value in the unit interval that sets the precision of the sketch
epsilon : float
A value in the unit interval that sets the precision of the sketch
k : int
A positive integer that sets the number of top items counted
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
Raises
------
ValueError
If delta or epsilon are not in the unit interval, or if k is
not a positive integer
if delta <= 0 or delta >= 1:
raise ValueError("delta must be between 0 and 1, exclusive")
if epsilon <= 0 or epsilon >= 1:
raise ValueError("epsilon must be between 0 and 1, exclusive")
if k < 1:
raise ValueError("k must be a positive integer")
self.w = int(np.ceil(np.exp(1) / epsilon))
self.d = int(np.ceil(np.log(1 / delta)))
self.k = k
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.count = np.zeros((self.d, self.w), dtype='int32')
self.heap, self.top_k = [], {} # top_k => [estimate, key] pairs
def update(self, key, increment):
Updates the sketch for the item with name of key by the amount
specified in increment
Parameters
----------
key : string
The item to update the value of in the sketch
increment : integer
The amount to update the sketch by for the given key
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
>>> s.update('http://www.cnn.com/', 1)
for row, hash_function in enumerate(self.hash_functions):
column = hash_function(abs(hash(key)))
self.count[row, column] += increment
self.update_heap(key)
def update_heap(self, key):
Updates the class's heap that keeps track of the top k items for a
given key
For the given key, it checks whether the key is present in the heap,
updating accordingly if so, and adding it to the heap if it is
absent
Parameters
----------
key : string
The item to check against the heap
estimate = self.get(key)
if not self.heap or estimate >= self.heap[0][0]:
if key in self.top_k:
old_pair = self.top_k.get(key)
old_pair[0] = estimate
heapq.heapify(self.heap)
else:
if len(self.top_k) < self.k:
heapq.heappush(self.heap, [estimate, key])
self.top_k[key] = [estimate, key]
else:
new_pair = [estimate, key]
old_pair = heapq.heappushpop(self.heap, new_pair)
if new_pair[1] != old_pair[1]:
del self.top_k[old_pair[1]]
self.top_k[key] = new_pair
self.top_k[key] = new_pair
def get(self, key):
Fetches the sketch estimate for the given key
Parameters
----------
key : string
The item to produce an estimate for
Returns
-------
estimate : int
The best estimate of the count for the given key based on the
sketch
Examples
--------
>>> s = Sketch(10**-7, 0.005, 40)
>>> s.update('http://www.cnn.com/', 1)
>>> s.get('http://www.cnn.com/')
1
value = sys.maxint
for row, hash_function in enumerate(self.hash_functions):
column = hash_function(abs(hash(key)))
value = min(self.count[row, column], value)
return value
def __generate_hash_function(self):
Returns a hash function from a family of pairwise-independent hash
functions
a, b = random_parameter(), random_parameter()
return lambda x: (a * x + b) % BIG_PRIME % self.w
# define a function to return a list of the exact top users, sorted by count
def exact_top_users(f, top_n = 10):
import operator
counts = {}
for user in f:
user = user.rstrip('\n')
try:
if user not in counts:
counts[user] = 1
else:
counts[user] += 1
except ValueError:
pass
except KeyError:
pass
counter = 0
results = []
for user,count in reversed(sorted(counts.iteritems(), key=operator.itemgetter(1))):
if counter >= top_n:
break
results.append('{},{}'.format(user,str(count)))
counter += 1
return results
# note that the output format is '[user] [count]'
f = open('CM_small.txt')
results_exact = sorted(exact_top_users(f))
print(results_exact)
# define a function to return a list of the estimated top users, sorted by count
def CM_top_users(f, s, top_n = 10):
for user_name in f:
s.update(user_name.rstrip('\n'),1)
results = []
counter = 0
for value in reversed(sorted(s.top_k.values())):
if counter >= top_n:
break
results.append('{1},{0}'.format(str(value[0]),str(value[1])))
counter += 1
return results
# note that the output format is '[user] [count]'
# instantiate a Sketch object
s = Sketch(10**-3, 0.1, 10)
f = open('CM_small.txt')
results_CM = sorted(CM_top_users(f,s))
print(results_CM)
for item in zip(results_exact,results_CM):
print(item)
s = Sketch(0.9, 0.9, 10)
f = open('CM_small.txt')
results_coarse_CM = CM_top_users(f,s)
print(results_coarse_CM)
f = open('CM_large.txt')
%time results_exact = exact_top_users(f)
print(results_exact)
# this could take a few minutes
f = open('CM_large.txt')
s = Sketch(10**-4, 0.001, 10)
%time results_CM = CM_top_users(f,s)
print(results_CM)
for item in zip(results_exact,results_CM):
print(item)
# the CM sketch gets the top entry (an outlier) correct but doesn't do well estimating the order of the more degenerate counts
# let's decrease the precision via both the epsilon and delta parameters, and see whether it still gets the "heavy-hitter" correct
f = open('CM_large.txt')
s = Sketch(10**-3, 0.01, 10)
%time results_CM = CM_top_users(f,s)
print(results_CM)
# nope...sketch is too coarse, too many collisions, and the prominence of user 'Euph0r1a__ 129' is obscured
for item in zip(results_exact,results_CM):
print(item)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Basic Idea of Count Min sketch
Step6: Is it possible to make the sketch so coarse that its estimates are wrong even for this data set?
Step7: Yes! (if you try enough) Why?
Step8: For this precision and dataset size, the CM algo takes much longer than the exact solution. In fact, the crossover point at which the CM sketch can achieve reasonable accuracy in the same time as the exact solution is a very large number of entries.
|
5,492
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccma', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
5,493
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import pandas as pd
import numpy as np
# Turn off notebook package warnings
import warnings
warnings.filterwarnings('ignore')
# print graphs in the document
%matplotlib inline
import seaborn as sns
import statsmodels.formula.api as sm #Import Package
model = sm.ols(formula = 'lifeExp ~ year', data = gapminder).fit() #Fit OLS Model
results = model.summary() #Get Results
print(results) # Print
#Hint: Use this Code in Your Function.
#You will need to replace data = gapminder, with the data subset for a specific country.
# write your function here
# Result for a Country (No Model)
life_expectancy("Afghanistan")
# Result for a Country (Model = True)
life_expectancy("New Zealand", True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data with Pandas
Step2: Generate the same graph as above, but this time log-transform the population variable
Step3: Example Results for the Function
|
5,494
|
<ASSISTANT_TASK:>
Python Code:
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(css_style='custom2.css', plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import os
import math
import time
import spacy
import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
from typing import List
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
%watermark -a 'Ethen' -d -t -v -p numpy,torch,torchtext,spacy
SEED = 2222
random.seed(SEED)
torch.manual_seed(SEED)
# !python -m spacy download de
# !python -m spacy download en
# the link below contains explanation of how spacy's tokenization works
# https://spacy.io/usage/spacy-101#annotations-token
spacy_de = spacy.load('de_core_news_sm')
spacy_en = spacy.load('en_core_web_sm')
def tokenize_de(text: str) -> List[str]:
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text: str) -> List[str]:
return [tok.text for tok in spacy_en.tokenizer(text)]
text = "I don't like apple."
tokenize_en(text)
source = Field(tokenize=tokenize_de, init_token='<sos>', eos_token='<eos>', lower=True)
target = Field(tokenize=tokenize_en, init_token='<sos>', eos_token='<eos>', lower=True)
train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(source, target))
print(f"Number of training examples: {len(train_data.examples)}")
print(f"Number of validation examples: {len(valid_data.examples)}")
print(f"Number of testing examples: {len(test_data.examples)}")
# equivalent, albeit more verbiage train_data.examples[0].src
train_data[0].src
train_data[0].trg
source.build_vocab(train_data, min_freq=2)
target.build_vocab(train_data, min_freq=2)
print(f"Unique tokens in source (de) vocabulary: {len(source.vocab)}")
print(f"Unique tokens in target (en) vocabulary: {len(target.vocab)}")
BATCH_SIZE = 128
# pytorch boilerplate that determines whether a GPU is present or not,
# this determines whether our dataset or model can to moved to a GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# create batches out of the dataset and sends them to the appropriate device
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data), batch_size=BATCH_SIZE, device=device)
# pretend that we're iterating over the iterator and print out the print element
test_batch = next(iter(test_iterator))
test_batch
test_batch.src
# adjustable parameters
INPUT_DIM = len(source.vocab)
OUTPUT_DIM = len(target.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
class Encoder(nn.Module):
Input :
- source batch
Layer :
source batch -> Embedding -> LSTM
Output :
- LSTM hidden state
- LSTM cell state
Parmeters
---------
input_dim : int
Input dimension, should equal to the source vocab size.
emb_dim : int
Embedding layer's dimension.
hid_dim : int
LSTM Hidden/Cell state's dimension.
n_layers : int
Number of LSTM layers.
dropout : float
Dropout for the LSTM layer.
def __init__(self, input_dim: int, emb_dim: int, hid_dim: int, n_layers: int, dropout: float):
super().__init__()
self.emb_dim = emb_dim
self.hid_dim = hid_dim
self.input_dim = input_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)
def forward(self, src_batch: torch.LongTensor):
Parameters
----------
src_batch : 2d torch.LongTensor
Batched tokenized source sentence of shape [sent len, batch size].
Returns
-------
hidden, cell : 3d torch.LongTensor
Hidden and cell state of the LSTM layer. Each state's shape
[n layers * n directions, batch size, hidden dim]
embedded = self.embedding(src_batch) # [sent len, batch size, emb dim]
outputs, (hidden, cell) = self.rnn(embedded)
# outputs -> [sent len, batch size, hidden dim * n directions]
return hidden, cell
encoder = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT).to(device)
hidden, cell = encoder(test_batch.src)
hidden.shape, cell.shape
class Decoder(nn.Module):
Input :
- first token in the target batch
- LSTM hidden state from the encoder
- LSTM cell state from the encoder
Layer :
target batch -> Embedding --
|
encoder hidden state ------|--> LSTM -> Linear
|
encoder cell state -------
Output :
- prediction
- LSTM hidden state
- LSTM cell state
Parmeters
---------
output : int
Output dimension, should equal to the target vocab size.
emb_dim : int
Embedding layer's dimension.
hid_dim : int
LSTM Hidden/Cell state's dimension.
n_layers : int
Number of LSTM layers.
dropout : float
Dropout for the LSTM layer.
def __init__(self, output_dim: int, emb_dim: int, hid_dim: int, n_layers: int, dropout: float):
super().__init__()
self.emb_dim = emb_dim
self.hid_dim = hid_dim
self.output_dim = output_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)
self.out = nn.Linear(hid_dim, output_dim)
def forward(self, trg: torch.LongTensor, hidden: torch.FloatTensor, cell: torch.FloatTensor):
Parameters
----------
trg : 1d torch.LongTensor
Batched tokenized source sentence of shape [batch size].
hidden, cell : 3d torch.FloatTensor
Hidden and cell state of the LSTM layer. Each state's shape
[n layers * n directions, batch size, hidden dim]
Returns
-------
prediction : 2d torch.LongTensor
For each token in the batch, the predicted target vobulary.
Shape [batch size, output dim]
hidden, cell : 3d torch.FloatTensor
Hidden and cell state of the LSTM layer. Each state's shape
[n layers * n directions, batch size, hidden dim]
# [1, batch size, emb dim], the 1 serves as sent len
embedded = self.embedding(trg.unsqueeze(0))
outputs, (hidden, cell) = self.rnn(embedded, (hidden, cell))
prediction = self.out(outputs.squeeze(0))
return prediction, hidden, cell
decoder = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT).to(device)
# notice that we are not passing the entire the .trg
prediction, hidden, cell = decoder(test_batch.trg[0], hidden, cell)
prediction.shape, hidden.shape, cell.shape
class Seq2Seq(nn.Module):
def __init__(self, encoder: Encoder, decoder: Decoder, device: torch.device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
'Hidden dimensions of encoder and decoder must be equal!'
assert encoder.n_layers == decoder.n_layers, \
'Encoder and decoder must have equal number of layers!'
def forward(self, src_batch: torch.LongTensor, trg_batch: torch.LongTensor,
teacher_forcing_ratio: float=0.5):
max_len, batch_size = trg_batch.shape
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder's output
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
# last hidden & cell state of the encoder is used as the decoder's initial hidden state
hidden, cell = self.encoder(src_batch)
trg = trg_batch[0]
for i in range(1, max_len):
prediction, hidden, cell = self.decoder(trg, hidden, cell)
outputs[i] = prediction
if random.random() < teacher_forcing_ratio:
trg = trg_batch[i]
else:
trg = prediction.argmax(1)
return outputs
# note that this implementation assumes that the size of the hidden layer,
# and the number of layer are the same between the encoder and decoder
encoder = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
decoder = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
seq2seq = Seq2Seq(encoder, decoder, device).to(device)
seq2seq
outputs = seq2seq(test_batch.src, test_batch.trg)
outputs.shape
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(seq2seq):,} trainable parameters')
optimizer = optim.Adam(seq2seq.parameters())
# ignore the padding index when calculating the loss
PAD_IDX = target.vocab.stoi['<pad>']
criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
def train(seq2seq, iterator, optimizer, criterion):
seq2seq.train()
epoch_loss = 0
for batch in iterator:
optimizer.zero_grad()
outputs = seq2seq(batch.src, batch.trg)
# 1. as mentioned in the seq2seq section, we will
# cut off the first element when performing the evaluation
# 2. the loss function only works on 2d inputs
# with 1d targets we need to flatten each of them
outputs_flatten = outputs[1:].view(-1, outputs.shape[-1])
trg_flatten = batch.trg[1:].view(-1)
loss = criterion(outputs_flatten, trg_flatten)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(seq2seq, iterator, criterion):
seq2seq.eval()
epoch_loss = 0
with torch.no_grad():
for batch in iterator:
# turn off teacher forcing
outputs = seq2seq(batch.src, batch.trg, teacher_forcing_ratio=0)
# trg = [trg sent len, batch size]
# output = [trg sent len, batch size, output dim]
outputs_flatten = outputs[1:].view(-1, outputs.shape[-1])
trg_flatten = batch.trg[1:].view(-1)
loss = criterion(outputs_flatten, trg_flatten)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 20
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(seq2seq, train_iterator, optimizer, criterion)
valid_loss = evaluate(seq2seq, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(seq2seq.state_dict(), 'tut1-model.pt')
# it's easier to see a change in perplexity between epoch as it's an exponential
# of the loss, hence the scale of the measure is much bigger
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
seq2seq.load_state_dict(torch.load('tut1-model.pt'))
test_loss = evaluate(seq2seq, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
example_idx = 0
example = train_data.examples[example_idx]
print('source sentence: ', ' '.join(example.src))
print('target sentence: ', ' '.join(example.trg))
src_tensor = source.process([example.src]).to(device)
trg_tensor = target.process([example.trg]).to(device)
print(trg_tensor.shape)
seq2seq.eval()
with torch.no_grad():
outputs = seq2seq(src_tensor, trg_tensor, teacher_forcing_ratio=0)
outputs.shape
output_idx = outputs[1:].squeeze(1).argmax(1)
' '.join([target.vocab.itos[idx] for idx in output_idx])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Seq2Seq
Step2: The next two code chunks
Step3: The tokenizer is language specific, e.g. it knows that in the English language don't should be tokenized into do not (n't).
Step4: Constructing Dataset
Step5: Upon loading the dataset, we can indexed and iterate over the Dataset like a normal list. Each element in the dataset bundles the attributes of a single record for us. We can index our dataset like a list and then access the .src and .trg attribute to take a look at the tokenized source and target sentence.
Step6: The next missing piece is to build the vocabulary for the source and target languages. That way we can convert our tokenized tokens into integers so that they can be fed into downstream models. Constructing the vocabulary and word to integer mapping is done by calling the build_vocab method of a Field on a dataset. This adds the vocab attribute to the field.
Step7: Constructing Iterator
Step8: We can list out the first batch, we see each element of the iterator is a Batch object, similar to element of a Dataset, we can access the fields via its attributes. The next important thing to note that it is of size [sentence length, batch size], and the longest sentence in the first batch of the source language has a length of 10.
Step11: To define our seq2seq model, we first specify the encoder and decoder separately.
Step14: Decoder Module
Step15: Seq2Seq Module
Step16: Training Seq2Seq
Step17: Evaluating Seq2Seq
Step18: Here, we pick a random example in our dataset, print out the original source and target sentence. Then takes a look at whether the "predicted" target sentence generated by the model.
|
5,495
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
AIP_CLIENT_WHEEL = "aiplatform_pipelines_client-0.1.0.caip20201123-py3-none-any.whl"
AIP_CLIENT_WHEEL_GCS_LOCATION = (
f"gs://cloud-aiplatform-pipelines/releases/20201123/{AIP_CLIENT_WHEEL}"
)
!gsutil cp {AIP_CLIENT_WHEEL_GCS_LOCATION} {AIP_CLIENT_WHEEL}
%pip install {AIP_CLIENT_WHEEL}
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import logging
import tensorflow as tf
import tfx
from aiplatform.pipelines import client
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
print("TFX Version: ", tfx.__version__)
PROJECT_ID = "jk-mlops-dev" # <---CHANGE THIS
PROJECT_NUMBER = "895222332033" # <---CHANGE THIS
API_KEY = "AIzaSyBS_RiaK3liaVthTUD91XuPDKIbiwDFlV8" # <---CHANGE THIS
USER = "user" # <---CHANGE THIS
BUCKET_NAME = "jk-ann-staging" # <---CHANGE THIS
VPC_NAME = "default" # <---CHANGE THIS IF USING A DIFFERENT VPC
REGION = "us-central1"
PIPELINE_NAME = "ann-pipeline-{}".format(USER)
PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(BUCKET_NAME, PIPELINE_NAME)
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
print("PIPELINE_ROOT: {}".format(PIPELINE_ROOT))
component_folder = "bq_components"
if tf.io.gfile.exists(component_folder):
print("Removing older file")
tf.io.gfile.rmtree(component_folder)
print("Creating component folder")
tf.io.gfile.mkdir(component_folder)
%cd {component_folder}
%%writefile ann_types.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Custom types for managing ANN artifacts.
from tfx.types import artifact
class ANNIndex(artifact.Artifact):
TYPE_NAME = 'ANNIndex'
class DeployedANNIndex(artifact.Artifact):
TYPE_NAME = 'DeployedANNIndex'
%%writefile ann_service.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Helper classes encapsulating ANN Service REST API.
import datetime
import logging
import json
import time
import google.auth
class ANNClient(object):
Base ANN Service client.
def __init__(self, project_id, project_number, region):
credentials, _ = google.auth.default()
self.authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
self.ann_endpoint = f'{region}-aiplatform.googleapis.com'
self.ann_parent = f'https://{self.ann_endpoint}/v1alpha1/projects/{project_id}/locations/{region}'
self.project_id = project_id
self.project_number = project_number
self.region = region
def wait_for_completion(self, operation_id, message, sleep_time):
Waits for a completion of a long running operation.
api_url = f'{self.ann_parent}/operations/{operation_id}'
start_time = datetime.datetime.utcnow()
while True:
response = self.authed_session.get(api_url)
if response.status_code != 200:
raise RuntimeError(response.json())
if 'done' in response.json().keys():
logging.info('Operation completed!')
break
elapsed_time = datetime.datetime.utcnow() - start_time
logging.info('{}. Elapsed time since start: {}.'.format(
message, str(elapsed_time)))
time.sleep(sleep_time)
return response.json()['response']
class IndexClient(ANNClient):
Encapsulates a subset of control plane APIs
that manage ANN indexes.
def __init__(self, project_id, project_number, region):
super().__init__(project_id, project_number, region)
def create_index(self, display_name, description, metadata):
Creates an ANN Index.
api_url = f'{self.ann_parent}/indexes'
request_body = {
'display_name': display_name,
'description': description,
'metadata': metadata
}
response = self.authed_session.post(api_url, data=json.dumps(request_body))
if response.status_code != 200:
raise RuntimeError(response.text)
operation_id = response.json()['name'].split('/')[-1]
return operation_id
def list_indexes(self, display_name=None):
Lists all indexes with a given display name or
all indexes if the display_name is not provided.
if display_name:
api_url = f'{self.ann_parent}/indexes?filter=display_name="{display_name}"'
else:
api_url = f'{self.ann_parent}/indexes'
response = self.authed_session.get(api_url).json()
return response['indexes'] if response else []
def delete_index(self, index_id):
Deletes an ANN index.
api_url = f'{self.ann_parent}/indexes/{index_id}'
response = self.authed_session.delete(api_url)
if response.status_code != 200:
raise RuntimeError(response.text)
class IndexDeploymentClient(ANNClient):
Encapsulates a subset of control plane APIs
that manage ANN endpoints and deployments.
def __init__(self, project_id, project_number, region):
super().__init__(project_id, project_number, region)
def create_endpoint(self, display_name, vpc_name):
Creates an ANN endpoint.
api_url = f'{self.ann_parent}/indexEndpoints'
network_name = f'projects/{self.project_number}/global/networks/{vpc_name}'
request_body = {
'display_name': display_name,
'network': network_name
}
response = self.authed_session.post(api_url, data=json.dumps(request_body))
if response.status_code != 200:
raise RuntimeError(response.text)
operation_id = response.json()['name'].split('/')[-1]
return operation_id
def list_endpoints(self, display_name=None):
Lists all ANN endpoints with a given display name or
all endpoints in the project if the display_name is not provided.
if display_name:
api_url = f'{self.ann_parent}/indexEndpoints?filter=display_name="{display_name}"'
else:
api_url = f'{self.ann_parent}/indexEndpoints'
response = self.authed_session.get(api_url).json()
return response['indexEndpoints'] if response else []
def delete_endpoint(self, endpoint_id):
Deletes an ANN endpoint.
api_url = f'{self.ann_parent}/indexEndpoints/{endpoint_id}'
response = self.authed_session.delete(api_url)
if response.status_code != 200:
raise RuntimeError(response.text)
return response.json()
def create_deployment(self, display_name, deployment_id, endpoint_id, index_id):
Deploys an ANN index to an endpoint.
api_url = f'{self.ann_parent}/indexEndpoints/{endpoint_id}:deployIndex'
index_name = f'projects/{self.project_number}/locations/{self.region}/indexes/{index_id}'
request_body = {
'deployed_index': {
'id': deployment_id,
'index': index_name,
'display_name': display_name
}
}
response = self.authed_session.post(api_url, data=json.dumps(request_body))
if response.status_code != 200:
raise RuntimeError(response.text)
operation_id = response.json()['name'].split('/')[-1]
return operation_id
def get_deployment_grpc_ip(self, endpoint_id, deployment_id):
Returns a private IP address for a gRPC interface to
an Index deployment.
api_url = f'{self.ann_parent}/indexEndpoints/{endpoint_id}'
response = self.authed_session.get(api_url)
if response.status_code != 200:
raise RuntimeError(response.text)
endpoint_ip = None
if 'deployedIndexes' in response.json().keys():
for deployment in response.json()['deployedIndexes']:
if deployment['id'] == deployment_id:
endpoint_ip = deployment['privateEndpoints']['matchGrpcAddress']
return endpoint_ip
def delete_deployment(self, endpoint_id, deployment_id):
Undeployes an index from an endpoint.
api_url = f'{self.ann_parent}/indexEndpoints/{endpoint_id}:undeployIndex'
request_body = {
'deployed_index_id': deployment_id
}
response = self.authed_session.post(api_url, data=json.dumps(request_body))
if response.status_code != 200:
raise RuntimeError(response.text)
return response
%%writefile compute_pmi.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BigQuery compute PMI component.
import logging
from google.cloud import bigquery
import tfx
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset as BQDataset
@component
def compute_pmi(
project_id: Parameter[str],
bq_dataset: Parameter[str],
min_item_frequency: Parameter[int],
max_group_size: Parameter[int],
item_cooc: OutputArtifact[BQDataset]):
stored_proc = f'{bq_dataset}.sp_ComputePMI'
query = f'''
DECLARE min_item_frequency INT64;
DECLARE max_group_size INT64;
SET min_item_frequency = {min_item_frequency};
SET max_group_size = {max_group_size};
CALL {stored_proc}(min_item_frequency, max_group_size);
'''
result_table = 'item_cooc'
logging.info(f'Starting computing PMI...')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result() # Wait for the job to complete
logging.info(f'Items PMI computation completed. Output in {bq_dataset}.{result_table}.')
# Write the location of the output table to metadata.
item_cooc.set_string_custom_property('table_name',
f'{project_id}:{bq_dataset}.{result_table}')
%%writefile train_item_matching.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BigQuery compute PMI component.
import logging
from google.cloud import bigquery
import tfx
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset as BQDataset
from tfx.types.standard_artifacts import Model as BQModel
@component
def train_item_matching_model(
project_id: Parameter[str],
bq_dataset: Parameter[str],
dimensions: Parameter[int],
item_cooc: InputArtifact[BQDataset],
bq_model: OutputArtifact[BQModel]):
item_cooc_table = item_cooc.get_string_custom_property('table_name')
stored_proc = f'{bq_dataset}.sp_TrainItemMatchingModel'
query = f'''
DECLARE dimensions INT64 DEFAULT {dimensions};
CALL {stored_proc}(dimensions);
'''
model_name = 'item_matching_model'
logging.info(f'Using item co-occurrence table: item_cooc_table')
logging.info(f'Starting training of the model...')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result()
logging.info(f'Model training completed. Output in {bq_dataset}.{model_name}.')
# Write the location of the model to metadata.
bq_model.set_string_custom_property('model_name',
f'{project_id}:{bq_dataset}.{model_name}')
%%writefile extract_embeddings.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Extracts embeddings to a BQ table.
import logging
from google.cloud import bigquery
import tfx
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset as BQDataset
from tfx.types.standard_artifacts import Model as BQModel
@component
def extract_embeddings(
project_id: Parameter[str],
bq_dataset: Parameter[str],
bq_model: InputArtifact[BQModel],
item_embeddings: OutputArtifact[BQDataset]):
embedding_model_name = bq_model.get_string_custom_property('model_name')
stored_proc = f'{bq_dataset}.sp_ExractEmbeddings'
query = f'''
CALL {stored_proc}();
'''
embeddings_table = 'item_embeddings'
logging.info(f'Extracting item embeddings from: {embedding_model_name}')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result() # Wait for the job to complete
logging.info(f'Embeddings extraction completed. Output in {bq_dataset}.{embeddings_table}')
# Write the location of the output table to metadata.
item_embeddings.set_string_custom_property('table_name',
f'{project_id}:{bq_dataset}.{embeddings_table}')
%%writefile export_embeddings.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Exports embeddings from a BQ table to a GCS location.
import logging
from google.cloud import bigquery
import tfx
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset
BQDataset = Dataset
@component
def export_embeddings(
project_id: Parameter[str],
gcs_location: Parameter[str],
item_embeddings_bq: InputArtifact[BQDataset],
item_embeddings_gcs: OutputArtifact[Dataset]):
filename_pattern = 'embedding-*.json'
gcs_location = gcs_location.rstrip('/')
destination_uri = f'{gcs_location}/{filename_pattern}'
_, table_name = item_embeddings_bq.get_string_custom_property('table_name').split(':')
logging.info(f'Exporting item embeddings from: {table_name}')
bq_dataset, table_id = table_name.split('.')
client = bigquery.Client(project=project_id)
dataset_ref = bigquery.DatasetReference(project_id, bq_dataset)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.job.ExtractJobConfig()
job_config.destination_format = bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON
extract_job = client.extract_table(
table_ref,
destination_uris=destination_uri,
job_config=job_config
)
extract_job.result() # Wait for resuls
logging.info(f'Embeddings export completed. Output in {gcs_location}')
# Write the location of the embeddings to metadata.
item_embeddings_gcs.uri = gcs_location
%%writefile create_index.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Creates an ANN index.
import logging
import google.auth
import numpy as np
import tfx
import tensorflow as tf
from google.cloud import bigquery
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset
from ann_service import IndexClient
from ann_types import ANNIndex
NUM_NEIGHBOURS = 10
MAX_LEAVES_TO_SEARCH = 200
METRIC = 'DOT_PRODUCT_DISTANCE'
FEATURE_NORM_TYPE = 'UNIT_L2_NORM'
CHILD_NODE_COUNT = 1000
APPROXIMATE_NEIGHBORS_COUNT = 50
@component
def create_index(
project_id: Parameter[str],
project_number: Parameter[str],
region: Parameter[str],
display_name: Parameter[str],
dimensions: Parameter[int],
item_embeddings: InputArtifact[Dataset],
ann_index: OutputArtifact[ANNIndex]):
index_client = IndexClient(project_id, project_number, region)
logging.info('Creating index:')
logging.info(f' Index display name: {display_name}')
logging.info(f' Embeddings location: {item_embeddings.uri}')
index_description = display_name
index_metadata = {
'contents_delta_uri': item_embeddings.uri,
'config': {
'dimensions': dimensions,
'approximate_neighbors_count': APPROXIMATE_NEIGHBORS_COUNT,
'distance_measure_type': METRIC,
'feature_norm_type': FEATURE_NORM_TYPE,
'tree_ah_config': {
'child_node_count': CHILD_NODE_COUNT,
'max_leaves_to_search': MAX_LEAVES_TO_SEARCH
}
}
}
operation_id = index_client.create_index(display_name,
index_description,
index_metadata)
response = index_client.wait_for_completion(operation_id, 'Waiting for ANN index', 45)
index_name = response['name']
logging.info('Index {} created.'.format(index_name))
# Write the index name to metadata.
ann_index.set_string_custom_property('index_name',
index_name)
ann_index.set_string_custom_property('index_display_name',
display_name)
%%writefile deploy_index.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Deploys an ANN index.
import logging
import numpy as np
import uuid
import tfx
import tensorflow as tf
from google.cloud import bigquery
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset
from ann_service import IndexDeploymentClient
from ann_types import ANNIndex
from ann_types import DeployedANNIndex
@component
def deploy_index(
project_id: Parameter[str],
project_number: Parameter[str],
region: Parameter[str],
vpc_name: Parameter[str],
deployed_index_id_prefix: Parameter[str],
ann_index: InputArtifact[ANNIndex],
deployed_ann_index: OutputArtifact[DeployedANNIndex]
):
deployment_client = IndexDeploymentClient(project_id,
project_number,
region)
index_name = ann_index.get_string_custom_property('index_name')
index_display_name = ann_index.get_string_custom_property('index_display_name')
endpoint_display_name = f'Endpoint for {index_display_name}'
logging.info(f'Creating endpoint: {endpoint_display_name}')
operation_id = deployment_client.create_endpoint(endpoint_display_name, vpc_name)
response = deployment_client.wait_for_completion(operation_id, 'Waiting for endpoint', 30)
endpoint_name = response['name']
logging.info(f'Endpoint created: {endpoint_name}')
endpoint_id = endpoint_name.split('/')[-1]
index_id = index_name.split('/')[-1]
deployed_index_display_name = f'Deployed {index_display_name}'
deployed_index_id = deployed_index_id_prefix + str(uuid.uuid4())
logging.info(f'Creating deployed index: {deployed_index_id}')
logging.info(f' from: {index_name}')
operation_id = deployment_client.create_deployment(
deployed_index_display_name,
deployed_index_id,
endpoint_id,
index_id)
response = deployment_client.wait_for_completion(operation_id, 'Waiting for deployment', 60)
logging.info('Index deployed!')
deployed_index_ip = deployment_client.get_deployment_grpc_ip(
endpoint_id, deployed_index_id
)
# Write the deployed index properties to metadata.
deployed_ann_index.set_string_custom_property('endpoint_name',
endpoint_name)
deployed_ann_index.set_string_custom_property('deployed_index_id',
deployed_index_id)
deployed_ann_index.set_string_custom_property('index_name',
index_name)
deployed_ann_index.set_string_custom_property('deployed_index_grpc_ip',
deployed_index_ip)
import os
from compute_pmi import compute_pmi
from create_index import create_index
from deploy_index import deploy_index
from export_embeddings import export_embeddings
from extract_embeddings import extract_embeddings
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner
# Only required for local run.
from tfx.orchestration.metadata import sqlite_metadata_connection_config
from tfx.orchestration.pipeline import Pipeline
from train_item_matching import train_item_matching_model
def ann_pipeline(
pipeline_name,
pipeline_root,
metadata_connection_config,
project_id,
project_number,
region,
vpc_name,
bq_dataset_name,
min_item_frequency,
max_group_size,
dimensions,
embeddings_gcs_location,
index_display_name,
deployed_index_id_prefix,
) -> Pipeline:
Implements the SCANN training pipeline.
pmi_computer = compute_pmi(
project_id=project_id,
bq_dataset=bq_dataset_name,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size,
)
bqml_trainer = train_item_matching_model(
project_id=project_id,
bq_dataset=bq_dataset_name,
item_cooc=pmi_computer.outputs.item_cooc,
dimensions=dimensions,
)
embeddings_extractor = extract_embeddings(
project_id=project_id,
bq_dataset=bq_dataset_name,
bq_model=bqml_trainer.outputs.bq_model,
)
embeddings_exporter = export_embeddings(
project_id=project_id,
gcs_location=embeddings_gcs_location,
item_embeddings_bq=embeddings_extractor.outputs.item_embeddings,
)
index_constructor = create_index(
project_id=project_id,
project_number=project_number,
region=region,
display_name=index_display_name,
dimensions=dimensions,
item_embeddings=embeddings_exporter.outputs.item_embeddings_gcs,
)
index_deployer = deploy_index(
project_id=project_id,
project_number=project_number,
region=region,
vpc_name=vpc_name,
deployed_index_id_prefix=deployed_index_id_prefix,
ann_index=index_constructor.outputs.ann_index,
)
components = [
pmi_computer,
bqml_trainer,
embeddings_extractor,
embeddings_exporter,
index_constructor,
index_deployer,
]
return Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
# Only needed for local runs.
metadata_connection_config=metadata_connection_config,
components=components,
)
pipeline_root = f"/tmp/{PIPELINE_NAME}"
local_mlmd_folder = "/tmp/mlmd"
if tf.io.gfile.exists(pipeline_root):
print("Removing previous artifacts...")
tf.io.gfile.rmtree(pipeline_root)
if tf.io.gfile.exists(local_mlmd_folder):
print("Removing local mlmd SQLite...")
tf.io.gfile.rmtree(local_mlmd_folder)
print("Creating mlmd directory: ", local_mlmd_folder)
tf.io.gfile.mkdir(local_mlmd_folder)
print("Creating pipeline root folder: ", pipeline_root)
tf.io.gfile.mkdir(pipeline_root)
bq_dataset_name = "song_embeddings"
index_display_name = "Song embeddings"
deployed_index_id_prefix = "deployed_song_embeddings_"
min_item_frequency = 15
max_group_size = 100
dimensions = 50
embeddings_gcs_location = f"gs://{BUCKET_NAME}/embeddings"
metadata_connection_config = sqlite_metadata_connection_config(
os.path.join(local_mlmd_folder, "metadata.sqlite")
)
pipeline = ann_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=pipeline_root,
metadata_connection_config=metadata_connection_config,
project_id=PROJECT_ID,
project_number=PROJECT_NUMBER,
region=REGION,
vpc_name=VPC_NAME,
bq_dataset_name=bq_dataset_name,
index_display_name=index_display_name,
deployed_index_id_prefix=deployed_index_id_prefix,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size,
dimensions=dimensions,
embeddings_gcs_location=embeddings_gcs_location,
)
logging.getLogger().setLevel(logging.INFO)
BeamDagRunner().run(pipeline)
from ml_metadata import metadata_store
from ml_metadata.proto import metadata_store_pb2
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.filename_uri = os.path.join(
local_mlmd_folder, "metadata.sqlite"
)
connection_config.sqlite.connection_mode = 3 # READWRITE_OPENCREATE
store = metadata_store.MetadataStore(connection_config)
store.get_artifacts()
%%writefile Dockerfile
FROM gcr.io/tfx-oss-public/tfx:0.25.0
WORKDIR /pipeline
COPY ./ ./
ENV PYTHONPATH="/pipeline:${PYTHONPATH}"
!gcloud builds submit --tag gcr.io/{PROJECT_ID}/caip-tfx-custom:{USER} .
from aiplatform.pipelines import client
aipp_client = client.Client(project_id=PROJECT_ID, region=REGION, api_key=API_KEY)
metadata_connection_config = None
pipeline_root = PIPELINE_ROOT
pipeline = ann_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=pipeline_root,
metadata_connection_config=metadata_connection_config,
project_id=PROJECT_ID,
project_number=PROJECT_NUMBER,
region=REGION,
vpc_name=VPC_NAME,
bq_dataset_name=bq_dataset_name,
index_display_name=index_display_name,
deployed_index_id_prefix=deployed_index_id_prefix,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size,
dimensions=dimensions,
embeddings_gcs_location=embeddings_gcs_location,
)
config = kubeflow_v2_dag_runner.KubeflowV2DagRunnerConfig(
project_id=PROJECT_ID,
display_name=PIPELINE_NAME,
default_image="gcr.io/{}/caip-tfx-custom:{}".format(PROJECT_ID, USER),
)
runner = kubeflow_v2_dag_runner.KubeflowV2DagRunner(
config=config, output_filename="pipeline.json"
)
runner.compile(pipeline, write_out=True)
aipp_client.create_run_from_job_spec("pipeline.json")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up the notebook's environment
Step2: Restart the kernel.
Step3: Import notebook dependencies
Step4: Configure GCP environment
Step5: Defining custom components
Step7: Define custom types for ANN service artifacts
Step22: Create a wrapper around ANN Service REST API
Step24: Create Compute PMI component
Step26: Create Train Item Matching Model component
Step28: Create Extract Embeddings component
Step30: Create Export Embeddings component
Step32: Create ANN index component
Step34: Deploy ANN index component
Step36: Creating a TFX pipeline
Step37: Testing the pipeline locally
Step38: Set pipeline parameters and create the pipeline
Step39: Start the run
Step40: Inspect produced metadata
Step41: NOTICE. The following code does not work with ANN Service Experimental. It will be finalized when the service moves to the Preview stage.
Step42: Build and push the docker image to Container Registry
Step43: Create AI Platform Pipelines client
Step44: Set the the parameters for AIPP execution and create the pipeline
Step45: Compile the pipeline
Step46: Submit the pipeline run
|
5,496
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def soliton(x, t, c, a):
Return phi(x, t) for a soliton wave with constants c and a.
p=.5*c*((1/np.cosh((c**.5)/2*(x-c*t-a)))**2)
return p
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
phi=np.ones([xpoints,tpoints])
for i in x:
for j in t:
phi[i,j]=soliton(x[i],t[j],c,a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
def plot_soliton_data(i=0):
Plot the soliton data at t[i] versus x.
plt.plot(soliton(x,t[i],c,a))
plt.xlabel('x')
plt.ylabel('t[i]')
plt.ylim(0,.6)
plt.title('phi(x,t[i])')
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
interactive(plot_soliton_data,i=(0,100,10))
assert True # leave this for grading the interact with plot_soliton_data cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using interact for animation with data
Step3: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays
Step4: Compute a 2d NumPy array called phi
Step6: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
Step7: Use interact to animate the plot_soliton_data function versus time.
|
5,497
|
<ASSISTANT_TASK:>
Python Code:
# from __future__ import exam_success
from __future__ import absolute_import
from __future__ import print_function
# Standard imports
%matplotlib inline
import os
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import random
import pandas as pd
import scipy.stats as stats
# Sk cheats
from sklearn.cross_validation import cross_val_score
from sklearn import grid_search
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
#from sklearn.preprocessing import Imputer # get rid of nan
from sklearn.decomposition import NMF # to add features based on the latent representation
from sklearn.decomposition import ProjectedGradientNMF
# Faster gradient boosting
import xgboost as xgb
# For neural networks models
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop
%%time
#filename = "data/train.csv"
filename = "data/reduced_train_100000.csv"
#filename = "data/reduced_train_1000000.csv"
raw = pd.read_csv(filename)
raw = raw.set_index('Id')
raw.columns
raw['Expected'].describe()
# Considering that the gauge may concentrate the rainfall, we set the cap to 1000
# Comment this line to analyse the complete dataset
l = len(raw)
raw = raw[raw['Expected'] < 300] #1000
print("Dropped %d (%0.2f%%)"%(l-len(raw),(l-len(raw))/float(l)*100))
raw.head(5)
raw.describe()
# We select all features except for the minutes past,
# because we ignore the time repartition of the sequence for now
features_columns = list([u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
def getXy(raw):
selected_columns = list([ u'minutes_past',u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
data = raw[selected_columns]
docX, docY = [], []
for i in data.index.unique():
if isinstance(data.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
docX.append(m)
docY.append(float(raw.loc[i]["Expected"]))
else:
m = data.loc[i].as_matrix()
docX.append(m)
docY.append(float(raw.loc[i][:1]["Expected"]))
X , y = np.array(docX) , np.array(docY)
return X,y
#noAnyNan = raw.loc[raw[features_columns].dropna(how='any').index.unique()]
noAnyNan = raw.dropna()
noFullNan = raw.loc[raw[features_columns].dropna(how='all').index.unique()]
fullNan = raw.drop(raw[features_columns].dropna(how='all').index)
print(len(raw))
print(len(noAnyNan))
print(len(noFullNan))
print(len(fullNan))
%%time
#X,y=getXy(noAnyNan)
X,y=getXy(noFullNan)
#%%time
#XX = [np.array(t).mean(0) for t in X]
#XX = np.array([np.append(np.nanmean(np.array(t),0),(np.array(t)[1:] - np.array(t)[:-1]).sum(0) ) for t in X])
XX=[]
for t in X:
nm = np.nanmean(t,0)
for idx,j in enumerate(nm):
if np.isnan(j):
nm[idx]=global_means[idx]
XX.append(nm)
XX=np.array(XX)
# rescale to clip min at 0 (for non negative matrix factorization)
XX_rescaled=XX[:,:]-np.min(XX,0)
%%time
nn = ProjectedGradientNMF()
W = nn.fit_transform(XX_rescaled)
#H = nn.components_
global_means = np.nanmean(noFullNan,0)
XX=[]
for t in X:
nm = np.nanmean(t,0)
for idx,j in enumerate(nm):
if np.isnan(j):
nm[idx]=global_means[idx]
XX.append(nm)
XX=np.array(XX)
# rescale to clip min at 0 (for non negative matrix factorization)
XX_rescaled=XX[:,:]-np.min(XX,0)
nmf = NMF(max_iter=1000)
W = nmf.fit_transform(XX_rescaled)
#H = nn.components_
# used to fill fully empty datas
global_means = np.nanmean(noFullNan,0)
# reduce the sequence structure of the data and produce
# new hopefully informatives features
def addFeatures(X,mf=0):
# used to fill fully empty datas
#global_means = np.nanmean(X,0)
XX=[]
nbFeatures=float(len(X[0][0]))
for idxt,t in enumerate(X):
# compute means, ignoring nan when possible, marking it when fully filled with nan
nm = np.nanmean(t,0)
tt=[]
for idx,j in enumerate(nm):
if np.isnan(j):
nm[idx]=global_means[idx]
tt.append(1)
else:
tt.append(0)
tmp = np.append(nm,np.append(tt,tt.count(0)/nbFeatures))
# faster if working on fully filled data:
#tmp = np.append(np.nanmean(np.array(t),0),(np.array(t)[1:] - np.array(t)[:-1]).sum(0) )
# add the percentiles
tmp = np.append(tmp,np.nanpercentile(t,10,axis=0))
tmp = np.append(tmp,np.nanpercentile(t,50,axis=0))
tmp = np.append(tmp,np.nanpercentile(t,90,axis=0))
for idx,i in enumerate(tmp):
if np.isnan(i):
tmp[idx]=0
# adding the dbz as a feature
test = t
try:
taa=test[:,0]
except TypeError:
taa=[test[0][0]]
valid_time = np.zeros_like(taa)
valid_time[0] = taa[0]
for n in xrange(1,len(taa)):
valid_time[n] = taa[n] - taa[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
sum=0
try:
column_ref=test[:,2]
except TypeError:
column_ref=[test[0][2]]
for dbz, hours in zip(column_ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
if not(mf is 0):
tmp = np.append(tmp,mf[idxt])
XX.append(np.append(np.array(sum),tmp))
#XX.append(np.array([sum]))
#XX.append(tmp)
return XX
%time
XX=addFeatures(X,mf=W)
#XX=addFeatures(X)
def splitTrainTest(X, y, split=0.2):
tmp1, tmp2 = [], []
ps = int(len(X) * (1-split))
index_shuf = range(len(X))
random.shuffle(index_shuf)
for i in index_shuf:
tmp1.append(X[i])
tmp2.append(y[i])
return tmp1[:ps], tmp2[:ps], tmp1[ps:], tmp2[ps:]
X_train,y_train, X_test, y_test = splitTrainTest(XX,y)
def manualScorer(estimator, X, y):
err = (estimator.predict(X_test)-y_test)**2
return -err.sum()/len(err)
svr = SVR(kernel='rbf', C=800.0)
%%time
srv = svr.fit(X_train,y_train)
print(svr.score(X_train,y_train))
print(svr.score(X_test,y_test))
err = (svr.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (svr.predict(X_test)-y_test)**2
err.sum()/len(err)
%%time
svr_score = cross_val_score(svr, XX, y, cv=5)
print("Score: %s\nMean: %.03f"%(svr_score,svr_score.mean()))
knn = KNeighborsRegressor(n_neighbors=6,weights='distance',algorithm='ball_tree')
#parameters = {'weights':('distance','uniform'),'algorithm':('auto', 'ball_tree', 'kd_tree', 'brute')}
parameters = {'n_neighbors':range(1,10,1)}
grid_knn = grid_search.GridSearchCV(knn, parameters,scoring=manualScorer)
%%time
grid_knn.fit(X_train,y_train)
print(grid_knn.grid_scores_)
print("Best: ",grid_knn.best_params_)
knn = grid_knn.best_estimator_
knn= knn.fit(X_train,y_train)
print(knn.score(X_train,y_train))
print(knn.score(X_test,y_test))
err = (knn.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (knn.predict(X_test)-y_test)**2
err.sum()/len(err)
etreg = ExtraTreesRegressor(n_estimators=200, max_depth=None, min_samples_split=1, random_state=0,n_jobs=4)
parameters = {'n_estimators':range(100,200,10)}
grid_rf = grid_search.GridSearchCV(etreg, parameters,n_jobs=4,scoring=manualScorer)
%%time
grid_rf.fit(X_train,y_train)
print(grid_rf.grid_scores_)
print("Best: ",grid_rf.best_params_)
grid_rf.best_params_
#etreg = grid_rf.best_estimator_
%%time
etreg = etreg.fit(X_train,y_train)
print(etreg.score(X_train,y_train))
print(etreg.score(X_test,y_test))
err = (etreg.predict(X_train)-y_train)**2
err.sum()/len(err)
err = (etreg.predict(X_test)-y_test)**2
err.sum()/len(err)
rfr = RandomForestRegressor(n_estimators=200, criterion='mse', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1,
random_state=None, verbose=0, warm_start=False)
%%time
rfr = rfr.fit(X_train,y_train)
print(rfr.score(X_train,y_train))
print(rfr.score(X_test,y_test))
# the dbz feature does not influence xgbr so much
xgbr = xgb.XGBRegressor(max_depth=6, learning_rate=0.1, n_estimators=700, silent=True,
objective='reg:linear', nthread=-1, gamma=0, min_child_weight=1,
max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5,
seed=0, missing=None)
%%time
xgbr = xgbr.fit(X_train,y_train)
# without the nmf features
# print(xgbr.score(X_train,y_train))
## 0.993948231144
# print(xgbr.score(X_test,y_test))
## 0.613931733332
# with nmf features
print(xgbr.score(X_train,y_train))
print(xgbr.score(X_test,y_test))
gbr = GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=900,
subsample=1.0, min_samples_split=2, min_samples_leaf=1, max_depth=4, init=None,
random_state=None, max_features=None, alpha=0.5,
verbose=0, max_leaf_nodes=None, warm_start=False)
%%time
gbr = gbr.fit(X_train,y_train)
#os.system('say "終わりだ"') #its over!
#parameters = {'max_depth':range(2,5,1),'alpha':[0.5,0.6,0.7,0.8,0.9]}
#parameters = {'subsample':[0.2,0.4,0.5,0.6,0.8,1]}
#parameters = {'subsample':[0.2,0.5,0.6,0.8,1],'n_estimators':[800,1000,1200]}
#parameters = {'max_depth':range(2,4,1)}
parameters = {'n_estimators':[400,800,1100]}
#parameters = {'loss':['ls', 'lad', 'huber', 'quantile'],'alpha':[0.3,0.5,0.8,0.9]}
#parameters = {'learning_rate':[0.1,0.5,0.9]}
grid_gbr = grid_search.GridSearchCV(gbr, parameters,n_jobs=2,scoring=manualScorer)
%%time
grid_gbr = grid_gbr.fit(X_train,y_train)
print(grid_gbr.grid_scores_)
print("Best: ",grid_gbr.best_params_)
print(gbr.score(X_train,y_train))
print(gbr.score(X_test,y_test))
err = (gbr.predict(X_train)-y_train)**2
print(err.sum()/len(err))
err = (gbr.predict(X_test)-y_test)**2
print(err.sum()/len(err))
err = (gbr.predict(X_train)-y_train)**2
print(err.sum()/len(err))
err = (gbr.predict(X_test)-y_test)**2
print(err.sum()/len(err))
t = []
for i in XX:
t.append(np.count_nonzero(~np.isnan(i)) / float(i.size))
pd.DataFrame(np.array(t)).describe()
svr.predict(X_test)
s = modelList[0]
t.mean(1)
modelList = [svr,knn,etreg,rfr,xgbr,gbr]
score_train = [[str(f).split("(")[0],f.score(X_train,y_train)] for f in modelList]
score_test = [[str(f).split("(")[0],f.score(X_test,y_test)] for f in modelList]
for idx,i in enumerate(score_train):
print(i[0])
print(" train: %.03f"%i[1])
print(" test: %.03f"%score_test[idx][1])
globalPred = np.array([f.predict(XX) for f in modelList]).T
globalPred[0]
y[0]
err = (globalPred.mean(1)-y)**2
print(err.sum()/len(err))
for f in modelList:
print(str(f).split("(")[0])
err = (f.predict(XX)-y)**2
print(err.sum()/len(err))
for f in modelList:
print(str(f).split("(")[0])
print(f.score(XX,y))
svrMeta = SVR()
%%time
svrMeta = svrMeta.fit(globalPred,y)
err = (svrMeta.predict(globalPred)-y)**2
print(err.sum()/len(err))
in_dim = len(XX[0])
out_dim = 1
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(128, input_shape=(in_dim,)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(1, init='uniform'))
model.add(Activation('linear'))
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='mean_squared_error', optimizer=sgd)
rms = RMSprop()
model.compile(loss='mean_squared_error', optimizer=rms)
#model.fit(X_train, y_train, nb_epoch=20, batch_size=16)
#score = model.evaluate(X_test, y_test, batch_size=16)
prep = []
for i in y_train:
prep.append(min(i,20))
prep=np.array(prep)
mi,ma = prep.min(),prep.max()
fy = (prep-mi) / (ma-mi)
#my = fy.max()
#fy = fy/fy.max()
model.fit(np.array(X_train), fy, batch_size=10, nb_epoch=10, validation_split=0.1)
pred = model.predict(np.array(X_test))*ma+mi
err = (pred-y_test)**2
err.sum()/len(err)
r = random.randrange(len(X_train))
print("(Train) Prediction %0.4f, True: %0.4f"%(model.predict(np.array([X_train[r]]))[0][0]*ma+mi,y_train[r]))
r = random.randrange(len(X_test))
print("(Test) Prediction %0.4f, True: %0.4f"%(model.predict(np.array([X_test[r]]))[0][0]*ma+mi,y_test[r]))
%%time
#filename = "data/reduced_test_5000.csv"
filename = "data/test.csv"
test = pd.read_csv(filename)
test = test.set_index('Id')
features_columns = list([u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
def getX(raw):
selected_columns = list([ u'minutes_past',u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th'])
data = raw[selected_columns]
docX= []
for i in data.index.unique():
if isinstance(data.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
docX.append(m)
else:
m = data.loc[i].as_matrix()
docX.append(m)
X = np.array(docX)
return X
#%%time
#X=getX(test)
#tmp = []
#for i in X:
# tmp.append(len(i))
#tmp = np.array(tmp)
#sns.countplot(tmp,order=range(tmp.min(),tmp.max()+1))
#plt.title("Number of ID per number of observations\n(On test dataset)")
#plt.plot()
testFull = test.dropna()
%%time
X=getX(testFull) # 1min
#XX = [np.array(t).mean(0) for t in X] # 10s
XX=addFeatures(X)
pd.DataFrame(gbr.predict(XX)).describe()
predFull = zip(testFull.index.unique(),gbr.predict(XX))
testNan = test.drop(test[features_columns].dropna(how='all').index)
tmp = np.empty(len(testNan))
tmp.fill(0.445000) # 50th percentile of full Nan dataset
predNan = zip(testNan.index.unique(),tmp)
testLeft = test.drop(testNan.index.unique()).drop(testFull.index.unique())
tmp = np.empty(len(testLeft))
tmp.fill(1.27) # 50th percentile of full Nan dataset
predLeft = zip(testLeft.index.unique(),tmp)
len(testFull.index.unique())
len(testNan.index.unique())
len(testLeft.index.unique())
pred = predFull + predNan + predLeft
pred.sort(key=lambda x: x[0], reverse=False)
submission = pd.DataFrame(pred)
submission.columns = ["Id","Expected"]
submission.head()
submission.loc[submission['Expected']<0,'Expected'] = 0.445
submission.to_csv("submit4.csv",index=False)
filename = "data/sample_solution.csv"
sol = pd.read_csv(filename)
sol
ss = np.array(sol)
%%time
for a,b in predFull:
ss[a-1][1]=b
ss
sub = pd.DataFrame(pred)
sub.columns = ["Id","Expected"]
sub.Id = sub.Id.astype(int)
sub.head()
sub.to_csv("submit3.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 13.765.202 lines in train.csv
Step2: Per wikipedia, a value of more than 421 mm/h is considered "Extreme/large hail"
Step3: We regroup the data by ID
Step4: On fully filled dataset
Step5: Predicitons
Step6:
Step7: max prof 24
Step8:
Step9:
Step10:
Step11:
Step12:
Step13:
Step14:
Step15: Here for legacy
Step16: Predict on testset
|
5,498
|
<ASSISTANT_TASK:>
Python Code:
from pprint import pprint
# I, Python am built from types, such as builtin types:
the_builtins = dir(__builtins__) # always here
pprint(the_builtins[-10:]) # no need to import
for the_string in ["list", "tuple", "dict", "int", "float"]:
if the_string in the_builtins:
print("Yes I am a native type: ", the_string)
assert type(eval(the_string)) == type # all types in this club
else:
print("No, I'm not native: ", the_string)
# usually up top
from string import ascii_lowercase as all_lowers
from random import shuffle
class P:
class Px is the more sophisticated version of this class
def __init__(self, p=None):
if not p:
original = all_lowers + ' '
scrambled = list(original)
shuffle(scrambled)
self.perm = dict(zip(original, scrambled))
else:
self.perm = p
def __invert__(self):
reverse my perm, make a new me
reverse = dict(zip(self.perm.values(), self.perm.keys()))
return P(reverse) # <-- new P instance
def encrypt(self, s):
output = ""
for c in s:
output += self.perm[c]
return output
def decrypt(self, s):
rev = ~self # <-- new P instance
return rev.encrypt(s) # <-- symmetric key
p = P()
m = "i like python so much because it does everything" # palindrome
c = p.encrypt(m)
print(m) # plaintext
print(c) # ciphertext
d = p.decrypt(c)
print(d)
import sqlite3 as sql
import os.path
import json
import time
from contextlib import contextmanager
PATH = "/Users/kurner/Documents/classroom_labs/session10"
DB1 = os.path.join(PATH, 'periodic_table.db')
def mod_date():
return time.mktime(time.gmtime()) # GMT time
@contextmanager
def Connector(db):
try:
db.conn = sql.connect(db.db_name) # connection
db.curs = db.conn.cursor() # cursor
yield db
except Exception as oops:
if oops[0]:
raise
db.conn.close()
class elemsDB:
def __init__(self, db_name):
self.db_name = db_name
def seek(self, elem):
if self.conn:
if elem != "all":
query = ("SELECT * FROM Elements "
"WHERE elem_symbol = '{}'".format(elem))
self.curs.execute(query)
result = self.curs.fetchone()
if result:
return json.dumps(list(result))
else:
query = "SELECT * FROM Elements ORDER BY elem_protons"
self.curs.execute(query)
result={}
for row in self.curs.fetchall():
result[row[1]] = list(row)
return json.dumps(result)
return "NOT FOUND"
output = ""
with Connector(elemsDB(DB1)) as dbx:
output = dbx.seek("C")
print(output)
import requests
data = {}
data["protons"]=100
data["symbol"]="Kr"
data["long_name"]="Kirbium"
data["mass"]=300
data["series"]="Dunno"
data["secret"]="DADA" # <--- primitive authentication
the_url = 'http://localhost:5000/api/elements'
r = requests.post(the_url, data=data)
print(r.status_code)
print(r.content)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets check our understanding that the native types -- the ones we count on to build more complex types -- live in builtins
Step4: And now for something completely different, lets define a class that does substitution based on a permutation of lower-case ascii letters plus space. Such a type is given more substantial implementation in the form of our px_class.py, which allows permutations to multiply, giving more permuations.
Step5: In the code below, we use a context manager to connect and disconnect from a SQLite database. The context manager is developed from a simple generator with precisely one yield statement, using the @contextmanager decorator.
Step6: At this point, we're able to seek a specific row from the Elements table, or request all of them. In a Flask web application, the controlling argument might come from a GET request, i.e. a URL such as /api/elements?elem=H
Step7: To be continued...
|
5,499
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import statsmodels.api as sm
from scipy import stats
from matplotlib import pyplot as plt
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=14)
print(sm.datasets.star98.NOTE)
data = sm.datasets.star98.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
print(data.endog[:5,:])
print(data.exog[:2,:])
glm_binom = sm.GLM(data.endog, data.exog, family=sm.families.Binomial())
res = glm_binom.fit()
print(res.summary())
print('Total number of trials:', data.endog[0].sum())
print('Parameters: ', res.params)
print('T-values: ', res.tvalues)
means = data.exog.mean(axis=0)
means25 = means.copy()
means25[0] = stats.scoreatpercentile(data.exog[:,0], 25)
means75 = means.copy()
means75[0] = lowinc_75per = stats.scoreatpercentile(data.exog[:,0], 75)
resp_25 = res.predict(means25)
resp_75 = res.predict(means75)
diff = resp_75 - resp_25
print("%2.4f%%" % (diff*100))
nobs = res.nobs
y = data.endog[:,0]/data.endog.sum(1)
yhat = res.mu
from statsmodels.graphics.api import abline_plot
fig, ax = plt.subplots()
ax.scatter(yhat, y)
line_fit = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
abline_plot(model_results=line_fit, ax=ax)
ax.set_title('Model Fit Plot')
ax.set_ylabel('Observed values')
ax.set_xlabel('Fitted values');
fig, ax = plt.subplots()
ax.scatter(yhat, res.resid_pearson)
ax.hlines(0, 0, 1)
ax.set_xlim(0, 1)
ax.set_title('Residual Dependence Plot')
ax.set_ylabel('Pearson Residuals')
ax.set_xlabel('Fitted values')
from scipy import stats
fig, ax = plt.subplots()
resid = res.resid_deviance.copy()
resid_std = stats.zscore(resid)
ax.hist(resid_std, bins=25)
ax.set_title('Histogram of standardized deviance residuals');
from statsmodels import graphics
graphics.gofplots.qqplot(resid, line='r')
print(sm.datasets.scotland.DESCRLONG)
data2 = sm.datasets.scotland.load()
data2.exog = sm.add_constant(data2.exog, prepend=False)
print(data2.exog[:5,:])
print(data2.endog[:5])
glm_gamma = sm.GLM(data2.endog, data2.exog, family=sm.families.Gamma(sm.families.links.log()))
glm_results = glm_gamma.fit()
print(glm_results.summary())
nobs2 = 100
x = np.arange(nobs2)
np.random.seed(54321)
X = np.column_stack((x,x**2))
X = sm.add_constant(X, prepend=False)
lny = np.exp(-(.03*x + .0001*x**2 - 1.0)) + .001 * np.random.rand(nobs2)
gauss_log = sm.GLM(lny, X, family=sm.families.Gaussian(sm.families.links.log()))
gauss_log_results = gauss_log.fit()
print(gauss_log_results.summary())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GLM
Step2: Load the data and add a constant to the exogenous (independent) variables
Step3: The dependent variable is N by 2 (Success
Step4: The independent variables include all the other variables described above, as
Step5: Fit and summary
Step6: Quantities of interest
Step7: First differences
Step8: The interquartile first difference for the percentage of low income households in a school district is
Step9: Plots
Step10: Plot yhat vs y
Step11: Plot yhat vs. Pearson residuals
Step12: Histogram of standardized deviance residuals
Step13: QQ Plot of Deviance Residuals
Step14: GLM
Step15: Load the data and add a constant to the exogenous variables
Step16: Model Fit and summary
Step17: GLM
Step18: Fit and summary (artificial data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.