text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is not! lets try another example,
Step2: This time the summation seems correct. That is the same as 2/3 = 0.6666...667
Step3: Small numbers
|
<ASSISTANT_TASK:>
Python Code:
a = 0
dx = 10**-9
for i in range(10**9):
a += dx
print(a)
a = 0
dx = 2**-30
for i in range(2**30):
a += dx
print(a)
a = 2*10**-16
a_ = (1+10**-16)-(1-10**-16)
(a_-a)/a
a = 2*10**-16
a_ = (1-1+10**-16+10**-16)
(a_-a)/a
s = 1
for i in range(10**7):
s+=10**-16
print(s)
s = 0
for i in range(10**7):
s+=10**-16
s+=1
print(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find the square of the sum of the first 100 natural numbers
Step2: Find and print the difference
Step3: Success!
|
<ASSISTANT_TASK:>
Python Code:
sum_of_squares = sum([i ** 2 for i in range(1,101)])
square_of_sum = (sum([i for i in range(1,101)])) ** 2
difference = square_of_sum - sum_of_squares
print(difference)
# This cell will be used for grading, leave it at the end of the notebook.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Before building any recommendation engines, we'll have to get the data into a useful form. Do this by first splitting the data into testing and training sets, and then by constructing two new dataframes whose columns are each unique movie and rows are each unique user, filling in 0 for missing values.
Step7: Now split the data into a training and test set, using a ratio 80/20 for train/test.
Step13: SVD
Step14: Overall RMSE of about 0.98
Step15: 7 is the optimal value of k in this case. Note that no cross-validation was performed!
|
<ASSISTANT_TASK:>
Python Code:
# Importing the data
import pandas as pd
import numpy as np
header = ['user_id', 'item_id', 'rating', 'timestamp']
data_movie_raw = pd.read_csv('../data/ml-100k/u.data', sep='\t', names=header)
data_movie_raw.head()
from sklearn.model_selection import train_test_split
# First split into train and test sets
data_train_raw, data_test_raw = train_test_split(data_movie_raw, train_size = 0.8)
# Turning to pivot tables
data_train = data_train_raw.pivot_table(index = 'user_id', columns = 'item_id', values = 'rating').fillna(0)
data_test = data_test_raw.pivot_table(index = 'user_id', columns = 'item_id', values = 'rating').fillna(0)
# Print the firest few rows
data_train.head()
# Libraries
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
class cos_engine:
def __init__(self, data_all):
Constructor for cos_engine class
Args:
data_all: Raw dataset containing all movies to build
a list of movies already seen by each user.
# Create copy of data
self.data_all = data_all.copy()
# Now build a list of movies each of you has seen
self.seen = []
for user in data_all.user_id.unique():
cur_seen = {}
cur_seen["user"] = user
cur_seen["seen"] = self.data_all[data_all.user_id == user].item_id
self.seen.append(cur_seen)
def fit(self, data_train):
Performs cosine similarity on a sparse matrix data_train
Args:
data_train: A pandas data frame data to estimate cosine similarity
# Create a copy of the dataframe
self.data_train = data_train.copy()
# Save the indices and column names
self.users = self.data_train.index
self.items = self.data_train.columns
# Compute mean vectors
self.user_means = self.data_train.replace(0, np.nan).mean(axis = 1)
self.item_means = self.data_train.T.replace(0, np.nan).mean(axis = 1)
# Get similarity matrices and compute sums for normalization
# For non adjusted cosine similarity, neglect subtracting the means.
self.data_train_adj = (self.data_train.replace(0, np.nan).T - self.user_means).fillna(0).T
self.user_cos = cosine_similarity(self.data_train_adj)
self.item_cos = cosine_similarity(self.data_train_adj.T)
self.user_cos_sum = np.abs(self.user_cos).sum(axis = 1)
self.item_cos_sum = np.abs(self.item_cos).sum(axis = 1)
self.user_cos_sum = self.user_cos_sum.reshape(self.user_cos_sum.shape[0], 1)
self.item_cos_sum = self.item_cos_sum.reshape(self.item_cos_sum.shape[0], 1)
def predict(self, method = 'user'):
Predicts using Cosine Similarity
Args:
method: A string indicating what method to use, user or item.
Default user.
Returns:
A pandas dataframe containing the prediction values
# Store prediction locally and turn to dataframe
if method == 'user':
self.pred = self.user_means[:, np.newaxis] + ((self.user_cos @ self.data_train_adj) / self.user_cos_sum)
self.pred = pd.DataFrame(self.pred, index = data_train.index, columns = data_train.columns)
elif method == 'item':
self.pred = self.item_means[:, np.newaxis] + ((self.data_train @ self.item_cos) / self.item_cos_sum.T).T
self.pred = pd.DataFrame(self.pred, columns = data_train.index.values, index = data_train.columns)
return(self.pred)
def test(self, data_test, root = False):
Tests fit given test data in data_test
Args:
data_test: A pandas dataframe containing test data
root: A boolean indicating whether to return the RMSE.
Default False
Returns:
The Mean Squared Error of the fit if root = False, the Root Mean\
Squared Error otherwise.
# Build a list of common indices (users) in the train and test set
row_idx = list(set(self.pred.index) & set(data_test.index))
# Prime the variables for loop
err = [] # To hold the Sum of Squared Errors
N = 0 # To count preditions for MSE calculation
for row in row_idx:
# Get the rows
test_row = data_test.loc[row, :]
pred_row = self.pred.loc[row, :]
# Get indices of nonzero elements in the test set
idx = test_row.index[test_row.nonzero()[0]]
# Get only common movies
temp_test = test_row[idx]
temp_pred = pred_row[idx]
# Compute error and count
temp_err = ((temp_pred - temp_test)**2).sum()
N = N + len(idx)
err.append(temp_err)
mse = np.sum(err) / N
# Switch for RMSE
if root:
err = np.sqrt(mse)
else:
err = mse
return(err)
def recommend(self, user, num_recs):
Tests fit given test data in data_test
Args:
data_test: A pandas dataframe containing test data
root: A boolean indicating whether to return the RMSE.
Default False
Returns:
The Mean Squared Error of the fit if root = False, the Root Mean
Squared Error otherwise.
# Get list of already seen movies for this user
idx_seen = next(item for item in self.seen if item["user"] == 2)["seen"]
# Remove already seen movies and recommend
rec = self.pred.loc[user, :].drop(idx_seen).nlargest(num_recs)
return(rec.index)
# Testing
cos_en = cos_engine(data_movie_raw)
cos_en.fit(data_train)
# Predict using user similarity
pred1 = cos_en.predict(method = 'user')
err = cos_en.test(data_test, root = True)
rec1 = cos_en.recommend(1, 5)
print("RMSE:", err)
print("Reccomendations for user 1:", rec1.values)
# And now with item
pred2 = cos_en.predict(method = 'item')
err = cos_en.test(data_test, root = True)
rec2 = cos_en.recommend(1, 5)
print("RMSE:", err)
print("Reccomendations for item 1:", rec2.values)
# Libraries
import pandas as pd
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
class svd_engine:
def __init__(self, data_all, k = 6):
Constructor for svd_engine class
Args:
k: The number of latent variables to fit
self.k = k
# Create copy of data
self.data_all = data_all.copy()
# Now build a list of movies each you has seen
self.seen = []
for user in data_all.user_id.unique():
cur_seen = {}
cur_seen["user"] = user
cur_seen["seen"] = self.data_all[data_all.user_id == user].item_id
self.seen.append(cur_seen)
def fit(self, data_train):
Performs SVD on a sparse matrix data_train
Args:
data_train: A pandas data frame data to estimate SVD
Returns:
Matricies u, s, and vt of SVD
# Save local copy of data
self.data_train = data_train.copy()
# Compute adjusted matrix
self.user_means = self.data_train.replace(0, np.nan).mean(axis = 1)
self.item_means = self.data_train.T.replace(0, np.nan).mean(axis = 1)
self.data_train_adj = (self.data_train.replace(0, np.nan).T - self.user_means).fillna(0).T
# Save the indices and column names
self.users = data_train.index
self.items = data_train.columns
# Train the model
self.u, self.s, self.vt = svds(self.data_train_adj, k = self.k)
return(self.u, np.diag(self.s), self.vt)
def predict(self):
Predicts using SVD
Returns:
A pandas dataframe containing the prediction values
# Store prediction locally and turn to dataframe, adding the mean back
self.pred = pd.DataFrame(self.u @ np.diag(self.s) @ self.vt,
index = self.users,
columns = self.items)
self.pred = self.user_means[:, np.newaxis] + self.pred
return(self.pred)
def test(self, data_test, root = False):
Tests fit given test data in data_test
Args:
data_test: A pandas dataframe containing test data
root: A boolean indicating whether to return the RMSE.
Default False
Returns:
The Mean Squared Error of the fit if root = False, the Root Mean\
Squared Error otherwise.
# Build a list of common indices (users) in the train and test set
row_idx = list(set(self.pred.index) & set(data_test.index))
# Prime the variables for loop
err = [] # To hold the Sum of Squared Errors
N = 0 # To count predictions for MSE calculation
for row in row_idx:
# Get the rows
test_row = data_test.loc[row, :]
pred_row = self.pred.loc[row, :]
# Get indices of nonzero elements in the test set
idx = test_row.index[test_row.nonzero()[0]]
# Get only common movies
temp_test = test_row[idx]
temp_pred = pred_row[idx]
# Compute error and count
temp_err = ((temp_pred - temp_test)**2).sum()
N = N + len(idx)
err.append(temp_err)
mse = np.sum(err) / N
# Switch for RMSE
if root:
err = np.sqrt(mse)
else:
err = mse
return(err)
def recommend(self, user, num_recs):
Tests fit given test data in data_test
Args:
data_test: A pandas dataframe containing test data
root: A boolean indicating whether to return the RMSE.
Default False
Returns:
The Mean Squared Error of the fit if root = False, the Root Mean\
Squared Error otherwise.
# Get list of already seen movies for this user
idx_seen = next(item for item in self.seen if item["user"] == 2)["seen"]
# Remove already seen movies and recommend
rec = self.pred.loc[user, :].drop(idx_seen).nlargest(num_recs)
return(rec.index)
# Testing
svd_en = svd_engine(data_movie_raw, k = 20)
svd_en.fit(data_train)
svd_en.predict()
err = svd_en.test(data_test, root = True)
rec = svd_en.recommend(1, 5)
print("RMSE:", err)
print("Reccomendations for user 1:", rec.values)
# Parameter tuning
import matplotlib.pyplot as plt
err = []
for cur_k in range(1, 50):
svd_en = svd_engine(data_movie_raw, k = cur_k)
svd_en.fit(data_train)
svd_en.predict()
err.append(svd_en.test(data_test))
plt.plot(range(1, 50), err)
plt.title('RMSE versus k')
plt.xlabel('k')
plt.ylabel('RMSE')
plt.show()
err.index(min(err))
# Build the engine
svd_en = svd_engine(data_movie_raw, k = 7)
svd_en.fit(data_train)
svd_en.predict()
# Now make recommendations
recs = []
for user in data_movie_raw.user_id.unique():
temp_rec = svd_en.recommend(user, 5)
recs.append(temp_rec)
recs[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 5.1.2 特徴変換
Step2: 5.1.3 scikit-learn の主成分分析
Step3: 5.2 線形判別分析による教師ありデータ圧縮
Step4: 5.2.2 新しい特徴部分空間の線形判別を選択する
Step5: 5.3.2 新しい特徴空間にサンプルを射影する
Step6: 5.2.4 scikit-learn による LDA
Step8: 5.3 カーネル主成分分析を使った非線形写像
Step9: 例1
Step10: 例2
Step11: 5.3.3 新しいデータ点を射影する
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import display
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
import pandas as pd
# http://archive.ics.uci.edu/ml/datasets/Wine
df_wine = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
# 1. d次元のデータセットを標準化する
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# 特徴量とクラスラベルを別々に抽出
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
# 全体の30%をテストデータにする
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
import numpy as np
# 2. 共分散行列を作成
cov_mat = np.cov(X_train_std.T)
# 3. 固有値と固有ベクトルを計算
# linalg.eig関数は固有分解(eigendecomposition)を実行する関数
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
eigen_vals
# 固有値を合計
tot = sum(eigen_vals)
# 分散説明率を計算
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
display("var_exp:", var_exp)
# 分散説明率の累積和を取得
cum_var_exp = np.cumsum(var_exp)
display("cum_var_exp:", cum_var_exp)
import matplotlib.pyplot as plt
# 分散説明率の棒グラフを作成
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance')
# 分散説明率の累積和の階段グラフを作成
plt.step(range(1, 14), cum_var_exp, where='mid', label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
# (固有値, 固有ベクトル)のタプルのリストを作成
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:,i]) for i in range(len(eigen_vals))]
# (固有値, 固有ベクトル)のタプルを大きいものから順に並び替え
eigen_pairs.sort(reverse=True)
# 4. 最も大きいk個の固有値に対するk個の固有ベクトルを選択する(ここでは k = 2 とする)
# 5. 上位k個の固有ベクトルから射影行列Wを作成する
w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis]))
display("Matrix W:", w)
# x' = xW
display(X_train_std[0].dot(w))
# 6. 射影行列Wを使ってd次元の入力データセットXを変換し、新しいk次元の特徴部分空間を取得する
# X' = XW
X_train_pca = X_train_std.dot(w)
# 2次元の散布図としてプロット
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
# クラスラベル、点の色、点の種類の組み合わせからなるリストを生成してプロット
for label, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train==label, 0], X_train_pca[y_train==label, 1], c=c, label=label, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# マーカーとカラーマップの用意
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# 決定領域のプロット
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# グリッドポイントの生成
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
# 各特徴量を1次元配列に変換して予測を実行
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
# 予測結果を元のグリッドポイントのデータサイズに変換
Z = Z.reshape(xx1.shape)
# グリッドポイントの等高線のプロット
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
# 軸の範囲の設定
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# クラスごとにサンプルをプロット
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y==cl, 0], y=X[y==cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
# 主成分数を指定して、PCAのインスタンスを生成
pca = PCA(n_components=2)
# ロジスティック回帰のインスタンスを生成
lr = LogisticRegression()
# トレーニングデータやテストデータをPCAに適合させる
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
# トレーニングデータをロジスティック回帰に適合させる
lr.fit(X_train_pca, y_train)
# 決定境界をプロット
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
# 決定境界をプロット
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
# 分散説明率を計算
pca.explained_variance_ratio_
# 1. d次元のデータセットを標準化する(dは特徴量の個数)
# X_train_std, X_test_std は作成済
# 2. クラスごとにd次元の平均ベクトルを計算する
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train==label], axis=0))
print('MV {}:, {}\n'.format(label, mean_vecs[label - 1]))
# 3. クラス間変動行列SBと、クラス内変動行列SWを生成する
d = 13 # 特徴量の個数
# クラス内変動行列 SW
S_W = np.zeros((d, d)) # 13 x 13 で値がすべて0の行列を生成
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d))
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1)
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter
print('Within-class scatter matrix: {}x{}'.format(S_W.shape[0], S_W.shape[1]))
# クラスラベルの一様に分散していない
print('Class label distribution: {}'.format(np.bincount(y_train)[1:]))
d = 13
# クラス内変動行列 SW
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: {}x{}'.format(S_W.shape[0], S_W.shape[1]))
# クラス間変動行列SB
mean_overall = np.mean(X_train_std, axis=0)
d = 13
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train[y_train==i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1)
mean_overall = mean_overall.reshape(d, 1)
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: {}x{}'.format(S_B.shape[0], S_B.shape[1]))
X_train[y_train==2, :].shape[0]
# 4. 行列 SW^-1 SB の固有ベクトルと対応する固有値を計算する
# inv関数で逆行列、dot関数で行列積、eig関数で固有値を計算
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
# (固有値, 固有ベクトル)のタプルのリストを作成
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))]
# (固有値, 固有ベクトル)のタプルを大きいものから順に並び替え
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
for eigen_val in eigen_pairs:
print(eigen_val[0])
# 固有値の実数部の総和を求める
tot = sum(eigen_vals.real)
# 分散説明率とその累積和を計算
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
display("discr:", discr)
cum_discr = np.cumsum(discr)
display("cum_discr:", cum_discr)
# 分散説明率の棒グラフを作成
plt.bar(range(1, 14), discr, alpha=0.5, align='center', label='individual "discriminability"')
# 分散説明率の累積和の階段グラフを作成
plt.step(range(1, 14), cum_discr, where='mid', label='cumulative "discriminability"')
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.show()
# 6. 変換行列Wを使ってサンプルを新しい特徴部分空間へ射影する
# 2つの固有ベクトルから変換行列を作成
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real, eigen_pairs[1][1][:, np.newaxis].real))
display("Matrix W:", w)
# 標準化したトレーニングデータに変換行列をかける
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
# クラスラベル、点の色、点の種類の組み合わせからなるリストを生成してプロット
for label, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train==label, 0] * -1, X_train_lda[y_train==label, 1] * -1, c=c, label=label, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
if Version(sklearn_version) < '0.18':
from sklearn.lda import LDA
else:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# 次元数を指定して、LDAのインスタンスを生成
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train_lda, y_train)
# 決定境界をプロット
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
RBPカーネルPCAの実装
パラメータ
----------
X: [NumPy ndarray], shape = [n_samples, n_features]
gamma: float
RBFカーネルのチューニングパラメータ
n_components: int
返される主成分の個数
戻り値
------
X_pc: [NumPy ndarray], shape = [n_samples, n_features]
射影されたデータセット
# M x N 次元のデータセットでペアごとの平方ユークリッド距離を計算
sq_dists = pdist(X, 'sqeuclidean')
# ペアごとの距離を正方行列に変換
mat_sq_dists = squareform(sq_dists)
# 対象カーネル行列を計算
K = exp(-gamma * mat_sq_dists)
# カーネル行列を中心窩
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# 中心化されたカーネル行列から固有値を取得
# numpy.eigh はそれらをソート順に返す
eigvals, eigvecs = eigh(K)
# 上位k個の固有ベクトル(射影されたサンプル)を収集
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
# 2つの半月形データを作成
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
# 標準のPCAを使ってみる
from sklearn.decomposition import PCA
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_spca[y==0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
from matplotlib.ticker import FormatStrFormatter
# カーネルPCA関数を使う
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
from sklearn.datasets import make_circles
import matplotlib.pyplot as plt
# 同心円用のデータを作成してプロット
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
# 標準のPCAを使ってみる
from sklearn.decomposition import PCA
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_spca[y==0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# カーネルPCA関数を使う
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_kpca[y==0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
from sklearn.decomposition import KernelPCA
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y==0, 0], X_skernpca[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y==1, 0], X_skernpca[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To solve this, we now create two separate problems and define an error measure on each.
Step2: Next, we combine the two error functions in a SumOfErrors.
Step3: Now that we have an error function, we proceed as usual
|
<ASSISTANT_TASK:>
Python Code:
import pints
import pints.toy as toy
import numpy as np
import matplotlib.pyplot as plt
# Create two models with a different initial population size
model_1 = toy.LogisticModel(initial_population_size=15)
model_2 = toy.LogisticModel(initial_population_size=2)
# Both models share a single set of parameters: it's the same system!
real_parameters = model_1.suggested_parameters()
# The two experiments were sampled at slightly different times
times_1 = model_1.suggested_times()[::3]
times_2 = model_2.suggested_times()[3::5]
experiment_1 = model_1.simulate(real_parameters, times_1)
experiment_2 = model_2.simulate(real_parameters, times_2)
# And both experiments had different noise levels
sigma_1 = 5
experiment_1 += np.random.normal(0, sigma_1, size=times_1.shape)
sigma_2 = 3
experiment_2 += np.random.normal(0, sigma_2, size=times_2.shape)
# Let's have a look!
plt.figure()
plt.plot(times_1, experiment_1, 'x', 'b')
plt.plot(times_2, experiment_2, 'x', 'g')
plt.show()
problem_1 = pints.SingleOutputProblem(model_1, times_1, experiment_1)
problem_2 = pints.SingleOutputProblem(model_2, times_2, experiment_2)
error_1 = pints.SumOfSquaresError(problem_1)
error_2 = pints.SumOfSquaresError(problem_2)
error = pints.SumOfErrors((error_1, error_2))
opt = pints.Optimisation(error, [1, 40])
opt.set_log_to_screen(False)
x, f = opt.run()
print(x)
# Now inspect the results
plt.figure()
plt.plot(times_1, experiment_1, 'x', 'b')
plt.plot(times_2, experiment_2, 'x', 'g')
plt.plot(times_1, problem_1.evaluate(x), 'b')
plt.plot(times_2, problem_2.evaluate(x), 'g')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, to make the code run on all of our engines (and not just on one), the following cells have to start with the parallel magic command %%px
Step3: Now, that we have MPI running, and mpi4py recognizing the nodes and their ranks, we can continue with the predator prey exercise, that we know from the first tutorial.
Step5: Then import the experiment_handling class from pymofa and define a run function
Step6: Specify the necessary parameters, generate their combinations and feed them to an experiment handle
Step7: And finally run the model - now in parallel
|
<ASSISTANT_TASK:>
Python Code:
from ipyparallel import Client
import os
c = Client()
view = c[:]
print(c.ids)
%%px
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
path = find('02_LocalParallelization.ipynb', '/home/')
print(path)
os.chdir(path)
%%px
from mpi4py import MPI
com = MPI.COMM_WORLD
print(com.Get_rank())
%%px
import numpy as np
import matplotlib.pyplot as plt
def predprey_model(prey_birth_rate=0.1, prey_mortality=0.1,
predator_efficiency=0.1, predator_death_rate=0.01,
initial_prey=1., initial_predators=1.,
time_length=1000):
Discrete predetor prey model.
A = -1 * np.ones(time_length)
B = -1 * np.ones(time_length)
A[0] = initial_prey
B[0] = initial_predators
for t in range(1, time_length):
A[t] = A[t-1] + prey_birth_rate * A[t-1] - prey_mortality * B[t-1]*A[t-1]
B[t] = B[t-1] + predator_efficiency * B[t-1]*A[t-1] - predator_death_rate * B[t-1] +\
0.02 * (0.5 - np.random.rand())
return A, B
#preys, predators = predprey_model()
#plt.plot(preys, label="preys")
#plt.plot(predators, label="predators")
#plt.legend()
#plt.show()
%%px
# imports
from pymofa.experiment_handling import experiment_handling as eh
import itertools as it
import pandas as pd
# import cPickle
#Definingh the experiment execution function
# it gets paramater you want to investigate, plus `filename` as the last parameter
def RUN_FUNC(prey_birth_rate=0.1, prey_mortality=0.1,
predator_efficiency=0.1, predator_death_rate=0.01,
initial_prey=1., initial_predators=1.,
time_length=1000, filename='./'):
Insightful docstring.
print(prey_birth_rate, prey_mortality,
predator_efficiency, predator_death_rate,
initial_prey, initial_predators,
time_length)
# one could also do more complicated stuff here, e.g. drawing something from a random distribution
# running the model
# TO DO: there seems to be a problem passing arguments to function
#preys, predators = predprey_model(prey_birth_rate, prey_mortality,
# predator_efficiency, predator_death_rate,
# initial_prey, initial_predators,
# time_length)
preys, predators = predprey_model(
)
print(preys)
# preparing the data
res = pd.DataFrame({"preys": np.array(preys),
"predators": np.array(predators)})
# Save Result
res.to_pickle(filename)
# determine exit status (if something went wrong)
# if exit status > 0 == run passen
# if exit status < 0 == Run Failed
exit_status = 1
# RUN_FUNC needs to return exit_status
return exit_status
%%px
# Path where to Store the simulated Data
SAVE_PATH_RAW = "./dummy/pymofatutorial"
# Parameter combinations to investiage
prey_birth_rate = [0.1]
predator_death_rate = [0.1]
initial_pop = [1.]
PARAM_COMBS = list(it.product(prey_birth_rate, predator_death_rate, initial_pop))
# Sample Size
SAMPLE_SIZE = 5
# INDEX
INDEX = {0: 'prey_birth_rate', 1: 'predator_death_rate', 2: 'initial_prey'}
# initiate handle instance with experiment variables
handle = eh(SAMPLE_SIZE, PARAM_COMBS, INDEX, SAVE_PATH_RAW)
%%px
# Compute experiemnts raw data
handle.compute(RUN_FUNC)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: imports for Python, Pandas
Step2: JSON example, with string
Step3: JSON example, with file
Step4: JSON exercise
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import json
from pandas.io.json import json_normalize
# define json string
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {'governor': 'Rick Scott'},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {'governor': 'John Kasich'},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
# use normalization to create tables from nested element
json_normalize(data, 'counties')
# further populate tables created from nested element
json_normalize(data, 'counties', ['state', 'shortname', ['info', 'governor']])
# load json as string
json.load((open('data/world_bank_projects_less.json')))
# load as Pandas dataframe
sample_json_df = pd.read_json('data/world_bank_projects_less.json')
sample_json_df
df = pd.read_json('data/world_bank_projects.json')
df.groupby('countryshortname').size().sort_values(ascending=False).head(10)
data = json.load((open('data/world_bank_projects.json')))
project_themes = json_normalize(data, 'mjtheme_namecode')
project_themes.groupby(['code', 'name']).size().sort_values(ascending=False).head(10)
p = project_themes.copy()
c = p[p.name != ''].groupby('code').first().squeeze()
p['name'] = [c[x] for x in p.code]
p
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step4: Linear Regression with Gradient Descent code
Step5: Run Gradient Descent on training data
Step6: Plot trained line on data
Step7: <br>
Step8: Upload .csv file to Kaggle.com
Step9: <br>
Step10: <br>
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn import linear_model
import matplotlib.pyplot as plt
import matplotlib as mpl
# read house_train.csv data in pandas dataframe df_train using pandas read_csv function
df_train = pd.read_csv('datasets/house_price/train.csv', encoding='utf-8')
# check data by printing first few rows
df_train.head()
# check columns in dataset
df_train.columns
# check correlation matrix, darker means more correlation
corrmat = df_train.corr()
f, aX_train= plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
# SalePrice correlation matrix with top k variables
k = 10 #number of variables for heatmap
cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(df_train[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
#scatterplot with some important variables
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'YearBuilt']
sns.set()
sns.pairplot(df_train[cols], size = 2.5)
plt.show();
# Load X and y variables from pandas dataframe df_train
cols = ['GrLivArea']
X_train = np.array(df_train[cols])
y_train = np.array(df_train[["SalePrice"]])
# Get m = number of samples and n = number of features
m = X_train.shape[0]
n = X_train.shape[1]
# append a column of 1's to X for theta_0
X_train = np.insert(X_train,0,1,axis=1)
iterations = 1500
alpha = 0.000000001 # change it and find what happens
def h(X, theta): #Linear hypothesis function
hx = np.dot(X,theta)
return hx
def computeCost(theta,X,y): #Cost function
theta is an n- dimensional vector, X is matrix with n- columns and m- rows
y is a matrix with m- rows and 1 column
#note to self: *.shape is (rows, columns)
return float((1./(2*m)) * np.dot((h(X,theta)-y).T,(h(X,theta)-y)))
#Actual gradient descent minimizing routine
def gradientDescent(X,y, theta_start = np.zeros((n+1,1))):
theta_start is an n- dimensional vector of initial theta guess
X is input variable matrix with n- columns and m- rows. y is a matrix with m- rows and 1 column.
theta = theta_start
j_history = [] #Used to plot cost as function of iteration
theta_history = [] #Used to visualize the minimization path later on
for meaninglessvariable in range(iterations):
tmptheta = theta
# append for plotting
j_history.append(computeCost(theta,X,y))
theta_history.append(list(theta[:,0]))
#Simultaneously updating theta values
for j in range(len(tmptheta)):
tmptheta[j] = theta[j] - (alpha/m)*np.sum((h(X,theta) - y)*np.array(X[:,j]).reshape(m,1))
theta = tmptheta
return theta, theta_history, j_history
#Actually run gradient descent to get the best-fit theta values
initial_theta = np.zeros((n+1,1));
theta, theta_history, j_history = gradientDescent(X_train,y_train,initial_theta)
plt.plot(j_history)
plt.title("Convergence of Cost Function")
plt.xlabel("Iteration number")
plt.ylabel("Cost function")
plt.show()
# predict output for training data
hx_train= h(X_train, theta)
# plot it
plt.scatter(X_train[:,1],y_train)
plt.plot(X_train[:,1],hx_train[:,0], color='red')
plt.show()
# read data in pandas frame df_test and check first few rows
# write code here
df_test.head()
# check statistics of test data, make sure no data is missing.
print(df_test.shape)
df_test[cols].describe()
# Get X_test, no target variable (SalePrice) provided in test data. It is what we need to predict.
X_test = np.array(df_test[cols])
#Insert the usual column of 1's into the "X" matrix
X_test = np.insert(X_test,0,1,axis=1)
# predict test data labels i.e. y_test
predict = h(X_test, theta)
# save prediction as .csv file
pd.DataFrame({'Id': df_test.Id, 'SalePrice': predict[:,0]}).to_csv("predict1.csv", index=False)
from IPython.display import Image
Image(filename='images/asgn_01.png', width=500)
# import scikit-learn linear model
from sklearn import linear_model
# get X and y
# write code here
# Create linear regression object
# write code here check link above for example
# Train the model using the training sets. Use fit(X,y) command
# write code here
# The coefficients
print('Intercept: \n', regr.intercept_)
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(X_train) - y_train) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(X_train, y_train))
# read test X without 1's
# write code here
# predict output for test data. Use predict(X) command.
predict2 = # write code here
# remove negative sales by replacing them with zeros
predict2[predict2<0] = 0
# save prediction as predict2.csv file
# write code here
# define columns ['OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'YearBuilt']
# write code here
# check features range and statistics. Training dataset looks fine as all features has same count.
df_train[cols].describe()
# Load X and y variables from pandas dataframe df_train
# write code here
# Get m = number of samples and n = number of features
# write code here
#Feature normalizing the columns (subtract mean, divide by standard deviation)
#Store the mean and std for later use
#Note don't modify the original X matrix, use a copy
stored_feature_means, stored_feature_stds = [], []
Xnorm = np.array(X_train).copy()
for icol in range(Xnorm.shape[1]):
stored_feature_means.append(np.mean(Xnorm[:,icol]))
stored_feature_stds.append(np.std(Xnorm[:,icol]))
#Skip the first column if 1's
# if not icol: continue
#Faster to not recompute the mean and std again, just used stored values
Xnorm[:,icol] = (Xnorm[:,icol] - stored_feature_means[-1])/stored_feature_stds[-1]
# check data after normalization
pd.DataFrame(data=Xnorm,columns=cols).describe()
# Run Linear Regression from scikit-learn or code given above.
# write code here. Repeat from above.
# To predict output using ['OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'YearBuilt'] as input features.
# Check features range and statistics to see if there is any missing data.
# As you can see from count "GarageCars" and "TotalBsmtSF" has 1 missing value each.
df_test[cols].describe()
# Replace missing value with the mean of the feature
df_test['GarageCars'] = df_test['GarageCars'].fillna((df_test['GarageCars'].mean()))
df_test['TotalBsmtSF'] = df_test['TotalBsmtSF'].fillna((df_test['TotalBsmtSF'].mean()))
df_test[cols].describe()
# read test X without 1's
# write code here
# predict using trained model
predict3 = # write code here
# replace any negative predicted saleprice by zero
predict3[predict3<0] = 0
# predict target/output variable for test data using the trained model and upload to kaggle.
# write code to save output as predict3.csv here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This shows how to base64 encode text. b64encode() expects bytes and returns bytes (b).
Step2: To get the string representation of bytes, use decode().
|
<ASSISTANT_TASK:>
Python Code:
import requests
import base64
r = requests.get("https://api.github.com/repos/gkthiruvathukal/st-hec/contents/hydra/dataserver.py")
print(r.status_code)
r.json().keys()
b64data = r.json().get('content')
lines = base64.b64decode(b64data).decode("utf-8").split('\n')
selected_lines = lines[10:30]
print("\n".join(selected_lines))
text = "How to encode some text"
bytes = text.encode("UTF-8")
encoded = base64.b64encode(bytes)
encoded
encoded.decode("UTF-8")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optimizing out model using Bayesian Optimization!
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from keras.layers import Input
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Model
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
def preprocess_data(test_data=False):
def encode_one_categorical_feature(column):
le = LabelEncoder()
ohe = OneHotEncoder(sparse=False)
num_encoded = le.fit_transform(column.fillna('unk'))
oh_encoded = ohe.fit_transform(num_encoded.reshape(-1, 1))
return oh_encoded
data = pd.read_csv('data/train.csv')
target = ['SalePrice']
features = data.drop(['Id'] + target, axis=1).columns
dataset_types = pd.DataFrame(data[features].dtypes, columns=['datatype'])
dataset_types.reset_index(inplace=True)
numeric_features = dataset_types.rename(columns={"index" : "feature"}).feature[(dataset_types.datatype == 'float64') | (dataset_types.datatype == 'int64')]
num_data = data[numeric_features]
num_features = num_data.fillna(num_data.mean()).values
scaler = StandardScaler()
num_features_scaled = scaler.fit_transform(num_features)
categorical_features = dataset_types.rename(columns={"index" : "feature"}).feature[(dataset_types.datatype == 'object')]
cat_data = data[categorical_features]
cat_features = np.hstack([encode_one_categorical_feature(data[column]) for column in cat_data.columns])
print("Of the {} features in this dataset".format(len(data.columns)))
print("{} features are numeric".format(len(numeric_features)))
print("and {} features are categorical.".format(len(categorical_features)))
print("The last two are the target, which is numeric, and the id column.")
X = np.hstack((num_features_scaled, cat_features))
if test_data == True:
return X
y = data[target].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=606)
return X_train, X_test, y_train, y_test
def plot_history(history):
plt.plot(history.history['loss'], 'b')
plt.plot(history.history['val_loss'], 'r')
plt.title('model accuracy')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
def keras_model(X_train, X_test, y_train, y_test):
NUM_EPOCHS = 50
BATCH_SIZE = 128
inputs = Input(shape=(304, ))
x = Dropout(0.2)(inputs)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.2)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.4)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.4)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.4)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.4)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.4)(x)
predictions = Dense(1)(x)
model = Model(inputs=[inputs], outputs=[predictions])
model.compile(loss="mse", optimizer="adam")
history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_split=0.2, verbose=0)
plot_history(history)
score = model.evaluate(X_test, y_test, verbose=0)
print("Test MSE is {:.2e}".format(score))
return history, model
X_train, X_test, y_train, y_test = preprocess_data()
model, history = keras_model(X_train, X_test, y_train, y_test)
predicted = model.model.predict(X_test)
plt.plot(y_test - predicted)
test_data = preprocess_data(test_data=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we will use the survey data from the NC State students and compare it to model output. Below is the information from the survey. You should not need to edit this code block.
Step2: We will create a grid for our model using Landlab's RasterModelGrid class, which we need to import. We make a grid with dx = dy = 1.0 m (same resolution as the survey data). We make a grid that has 5 columns and 90 rows, to match the length of the profile from the real landscape.
Step3: Now we import and initialize the LinearDiffuser component. In this case the units on our diffusivity coefficient, or transport coefficient, are m$^2$yr$^{-1}$.
Step4: Now we figure out the analytical solution for the elevation of the steady-state profile.
Step5: Now you can update the values of $D$ in Code Block 4 and rerun Code Block 5 to fit the analytical solution to the field data.
Step6: Now we plot the evolved cross-section.
|
<ASSISTANT_TASK:>
Python Code:
# Code Block 1
import numpy as np
from landlab.io import read_esri_ascii
from landlab.plot.imshow import imshow_grid
import matplotlib.pyplot as plt
#below is to make plots show up in the notebook
%matplotlib inline
# Code Block 2
# distance and elevation data along the survey line
field_dist = np.array([0,1,2,3,4,4.99,5.99,6.99,7.99,8.99,9.99,10.99,11.99,
12.99,13.99,14.98,15.98,16.98,17.98,18.98,19.98,
20.98,21.98,22.98,23.98,24.97,25.97,26.97,27.97,
28.97,29.97,30.97,31.97,32.97,33.97,34.96,35.96,
36.96,37.96,38.96,39.96,40.96,41.96,42.96,43.95,
44.95,45.95,46.95,49.95,50.95,51.95,52.95,53.94,
54.94,55.94,56.94,57.94,58.94,59.94,60.94,61.94,
62.94,63.93,64.93,65.93,66.93,67.93,68.93,69.93,
70.93,71.93,72.92,73.92,74.92,75.92,76.92,77.92,
78.92,79.92,80.92,81.92,82.91,83.91,84.91,85.91,
86.91,87.91,88.91])
field_z = np.array([0,0.03,0.3,0.47,0.62,0.83,1.09,1.31,1.54,1.8,2.14,2.38,
2.55,2.84,3.15,3.49,3.78,4.05,4.41,4.57,4.77,5.05,5.29,
5.46,5.68,5.96,6.4,6.81,6.99,7.21,7.45,7.63,7.79,7.87,
8.06,8.24,8.4,8.51,8.65,8.68,8.82,8.98,9.01,9.04,9.05,
9.09,9.07,9.07,9.02,8.93,8.9,8.83,8.73,8.62,8.47,8.28,
8.22,8,7.82,7.75,7.39,7.2,7.04,6.79,6.6,6.39,6.1,5.77,
5.5,5.3,5.11,4.89,4.64,4.5,4.32,4.1,3.96,3.6,3.19,2.92,
2.73,2.41,2.12,1.76,1.21,0.95,0.56,0.06])
# Code Block 3
from landlab import RasterModelGrid
mg = RasterModelGrid((90, 5), 1.0) # make grid
z_vals = mg.add_ones('topographic__elevation', at='node') # initialize z values
# Set initial conditions
initial_elevation = np.multiply(z_vals, -1.0) # this line and next set elevation to 87. m
z_vals += initial_elevation
# Set boundary conditions
mg.set_closed_boundaries_at_grid_edges(True, False, True, False)
# Initialize values for plotting variables down the middle of the hillslope
ycoord_rast = mg.node_vector_to_raster(mg.node_y)
ys_grid = ycoord_rast[:, 2]
# Code Block 4
from landlab.components import LinearDiffuser
D = 0.005 # value in m^2/yr
lin_diffuse = LinearDiffuser(mg, linear_diffusivity=D)
# Uniform rate of rock uplift, which drives uniform erosion at steady state
uplift_rate = 0.00001 # m/year, start with 1e-5 m/yr
# Total time in years that the model will run for.
runtime = 500000 # years
# Stability criteria for timestep dt. Coefficient can be changed
# depending on our tolerance for stability vs tolerance for run time.
# Do not change this.
dt = 0.5 * mg.dx * mg.dx / D
print("dt", dt)
# nt is number of time steps
nt = int(runtime // dt)
# Below is to keep track of time for labeling plots
time_counter = 0
# length of uplift over a single time step, meters
uplift_per_step = uplift_rate * dt
# Code Block 5
#ANALYTICAL SOLUTION
ys = np.arange(mg.number_of_node_rows*mg.dx-mg.dx)
# location of divide or ridge crest -> middle of grid
# based on boundary conds.
divide_loc = (mg.number_of_node_rows*mg.dx-mg.dx)/2
# half-width of the ridge
half_width = (mg.number_of_node_rows*mg.dx-mg.dx)/2
# analytical solution for elevation under linear diffusion at steady state
zs = (uplift_rate/(2*D)) * \
(np.power(half_width, 2) - np.power(ys - divide_loc, 2))
#PLOTTING
plt.figure()
imshow_grid(mg, 'topographic__elevation')
plt.title('initial topography, at right is the colorbar')
plt.figure()
elev_rast = mg.node_vector_to_raster(
mg.at_node['topographic__elevation'])
plt.figure()
plt.plot(ys_grid, elev_rast[:, 2], 'r-', label='model')
plt.plot(ys, zs, 'k--', label='analytical solution')
plt.plot(field_dist, field_z, 'b:', label='field data')
plt.xlabel('horizontal distance (m)')
plt.ylabel('elevation (m)')
plt.legend(loc='lower center')
#plt.title('before running model')
# Code Block 6
for i in range(nt):
mg['node']['topographic__elevation'][mg.core_nodes] += uplift_per_step
lin_diffuse.run_one_step(dt)
time_counter += dt
print('time evolved for ',time_counter,' years')
# Code Block 7
plt.figure()
elev_rast = mg.node_vector_to_raster(
mg.at_node['topographic__elevation'])
plt.plot(ys_grid, elev_rast[:, 2], 'r-', label='model')
plt.plot(ys, zs, 'k--', label='analytical solution')
plt.plot(field_dist, field_z, 'b:', label='field data')
plt.xlabel('horizontal distance (m)')
plt.ylabel('vertical elevation (m)')
plt.legend(loc='lower center')
plt.title('topographic cross section at time %s, with D = %s m^2/yr'%(time_counter,D))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter Trends Places To Sheets Via Values Recipe Parameters
Step3: 4. Execute Trends Places To Sheets Via Values
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'auth_write':'service', # Credentials used for writing data.
'secret':'',
'key':'',
'places_dataset':'',
'places_query':'',
'places_legacy':False,
'destination_sheet':'',
'destination_tab':'',
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'twitter':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'secret':{'field':{'name':'secret','kind':'string','order':1,'default':''}},
'key':{'field':{'name':'key','kind':'string','order':2,'default':''}},
'trends':{
'places':{
'single_cell':True,
'bigquery':{
'dataset':{'field':{'name':'places_dataset','kind':'string','order':3,'default':''}},
'query':{'field':{'name':'places_query','kind':'string','order':4,'default':''}},
'legacy':{'field':{'name':'places_legacy','kind':'boolean','order':5,'default':False}}
}
}
},
'out':{
'sheets':{
'sheet':{'field':{'name':'destination_sheet','kind':'string','order':6,'default':''}},
'tab':{'field':{'name':'destination_tab','kind':'string','order':7,'default':''}},
'range':'A1'
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Construction de la base de synonymes
Step2: On passe en revue toute la basse (il y a environ 120.000 lignes) et on s'arrête après 10000 synonymes car sinon, cela prend un temps fou.
Step3: On affiche les premiers groupes
Step4: Génération d'une base de phrases modifiées
Step5: On découpe en mot.
Step6: On génère les phrases modifiées
Step7: On traite tous les mots
Step8: Exercice
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from actuariat_python.data import wolf_xml
wolf_xml()
import os
if not os.path.exists("wolf-1.0b4.xml"):
raise FileNotFoundError("wolf-1.0b4.xml")
if os.stat("wolf-1.0b4.xml").st_size < 3000000:
raise FileNotFoundError("Size of 'wolf-1.0b4.xml' is very small: {0}".format(os.stat("wolf-1.0b4.xml").st_size))
from actuariat_python.data import enumerate_wolf_synonyms
for syn in enumerate_wolf_synonyms("wolf-1.0b4.xml", errors="ignore"):
print(syn)
break
allsyn = {}
for line, syn in enumerate(enumerate_wolf_synonyms("wolf-1.0b4.xml")):
if line % 10000 == 0: print("line", line, "allsyn", len(allsyn))
clean = [_.lower() for _ in syn if " " not in _]
if len(clean) > 1:
for word in clean:
if word not in allsyn:
allsyn[word] = set(clean)
continue
else:
for cl in clean:
allsyn[word].add(cl)
if len(allsyn) > 10000:
break
len(allsyn)
i = 0
for k, v in allsyn.items():
print(k,v)
i += 1
if i > 10:
break
import urllib.request
with urllib.request.urlopen("http://www.gutenberg.org/cache/epub/4647/pg4647.txt") as u:
content = u.read()
char = content.decode(encoding="utf-8")
import re
reg = re.compile("([- a-zA-Zâàäéèëêîïôöùûü']+)")
phrases = [_.lower() for _ in reg.findall(char)]
for i, phrase in enumerate(phrases):
if i >= 990:
print(phrase)
if i >= 1000:
break
import random
def modification(phrase, allsyn, nmax=10):
mots = phrase.split()
options = []
nb = 1
for mot in mots:
if mot in allsyn:
options.append(list(set([mot] + list(allsyn[mot]))))
else:
options.append([mot])
nb *= len(options[-1])
if nb == 1:
return [phrase]
else:
res = []
for i in range(0, min(nmax, nb//2+1, nb)):
sol = []
for mot in options:
h = random.randint(0, len(mot)-1)
sol.append(mot[h])
res.append(sol)
return res
modification("chatouiller le cérébral", allsyn)
len(phrases)
with open("zadig_augmente.txt", "w", encoding="utf-8") as f:
total = 0
init = 0
for i, phrase in enumerate(phrases):
augm = modification(phrase, allsyn)
init += 1
for au in augm:
f.write(" ".join(au) + "\n")
total += 1
"total", total, "initial", init
from pyensae.datasource import download_data
download_data("zadig_augmente.zip")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 2
Step2: Question 3
|
<ASSISTANT_TASK:>
Python Code:
def calculate_weight(feature):
weight = (1/(max(feature) - min(feature))) ** 2
return weight
price = calculate_weight(np.array([500000, 350000, 600000, 400000], dtype=float))
room = calculate_weight(np.array([3, 2, 4, 2], dtype=float))
lot = calculate_weight(np.array([1840, 1600, 2000, 1900], dtype=float))
print price
print room
print lot
import numpy as np
s1 = np.array([2, 1, 1, 1, 1, 1, 1, 1, 0, 0], dtype=float)
s2 = np.array([0, 2, 1, 1, 0, 0, 0, 1, 2, 1], dtype=float)
print s1
print s2
euclidean_distance = np.sqrt(np.sum((s1 - s2)**2))
euclidean_distance
import numpy as np
s1 = np.array([2, 1, 1, 1, 1, 1, 1, 1, 0, 0], dtype=float)
s2 = np.array([0, 2, 1, 1, 0, 0, 0, 1, 2, 1], dtype=float)
print s1
print s2
cosine_similarity = np.dot(s1, s2)/(np.sqrt(np.sum(s1**2)) * np.sqrt(np.sum(s2**2)))
cosine_distance = 1 - cosine_similarity
cosine_distance
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulate data
Step2: Now lets look at the distribution of our coverage counts
Step3: Combine everything into a single dataset.
Step5: QC Time
Step6: Interactive Plotting
Step7: If you have a lot of data, then interact can be slow because at each step along the slider it tries to calculate the filter. There is a noter interactive widget interact_manual that only runs calculations when you hit the run button.
Step8: Other types of interactivity
Step9: Now lets mess with a point and see if it changes.
|
<ASSISTANT_TASK:>
Python Code:
# Import Module
import numpy as np
import scipy as sp
import scipy.stats as stats
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
# Simulate $\theta$
sp.random.seed(42)
theta1 = sp.random.normal(loc=0.5, scale=0.1, size=1000)
theta2 = sp.random.normal(loc=0.2, scale=0.1, size=360)
# Simulate coverage
cvg1 = sp.random.poisson(20, size=1000)
cvg2 = sp.random.poisson(4, size=360)
## I can't have a coverage of 0, so replace 0's with 1
cvg1[cvg1 == 0] = 1
cvg2[cvg2 == 0] = 1
## Create joint of theta1 and theat2
theta = np.concatenate((theta1, theta2))
## Create joint of cvg1 and cvg2
cvg = np.concatenate((cvg1, cvg2))
# Density of Plot $\theta$ 1 and 2
## Get x coordinates from 0 to 1
xs = np.linspace(0, 1, num=100)
## Get Density functions
density1 = stats.gaussian_kde(theta1)
density2 = stats.gaussian_kde(theta2)
density = stats.gaussian_kde(theta)
## Plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
ax1.plot(xs, density1(xs), label=r'$\theta$1')
ax1.plot(xs, density2(xs), label=r'$\theta$2')
ax1.set_title(r'Distribution of $\theta$1 and $\theta$2', fontsize=12)
ax1.legend()
ax2.plot(xs, density(xs), color='k', label=r'$\theta$1 + $\theta$2')
ax2.set_title(r'Joint Distribution of $\theta$1 and $\theta2$2', fontsize=12)
ax2.legend()
# Plot Distribution of Coverage
## Figure out the x limits
xs = np.linspace(0, cvg.max(), num=100)
## Get Density functions
density1 = stats.gaussian_kde(cvg1)
density2 = stats.gaussian_kde(cvg2)
## Plot
plt.plot(xs, density1(xs), label='High Coverage')
plt.plot(xs, density2(xs), label='Low Coverage')
plt.title('Distribution of Coverage')
plt.legend()
# Create Data Frame
dat = pd.DataFrame({'theta': theta, 'cvg': cvg})
dat.head(3)
# Plotting Desnsities is a lot easier with data frames
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
dat['theta'].plot(kind='kde', ax=ax1, title=r'Distribution of $\theta$')
dat['cvg'].plot(kind='kde', ax=ax2, title='Distribution of Coverage')
def pltLow(dat, cutoff):
Function to plot density after filtering
clean = dat[dat['cvg'] >= cutoff]
clean['theta'].plot(kind='kde', title=r'Distribution of $\theta${}Coverage Count Cutoff $\geq$ {}'.format('\n',cutoff), xlim=(-0.2, 1.2))
# Test plot function
pltLow(dat, 1)
from IPython.html.widgets import interact, interact_manual, IntSlider, fixed
interact(pltLow, dat=fixed(dat), cutoff=IntSlider(min=0, max=20))
interact_manual(pltLow, dat=fixed(dat), cutoff=IntSlider(min=0, max=20))
# Import the mpld3 library
import mpld3
# Plain Scatter plot showing relationship between coverage and theta
dat.plot(kind='scatter', x='cvg', y='theta', figsize=(10, 10))
# Plot figure with mpld3
fig, ax = plt.subplots(figsize=(10, 10))
scatter = ax.scatter(dat['cvg'], dat['theta'])
labels = ['row {}'.format(i) for i in dat.index.tolist()]
tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)
mpld3.plugins.connect(fig, tooltip)
mpld3.display()
dat.ix[262, 'theta'] = -0.1
# Plot figure with mpld3
fig, ax = plt.subplots(figsize=(10, 10))
scatter = ax.scatter(dat['cvg'], dat['theta'])
labels = ['row {}'.format(i) for i in dat.index.tolist()]
tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)
mpld3.plugins.connect(fig, tooltip)
mpld3.display()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 数据预处理
Step2: 2.1.2 区间缩放法
Step3: 2.1.3 标准化与归一化的区别
Step4: 2.2 对定量特征二值化
Step5: 2.3 对定性特征哑编码
Step6: 2.4 缺失值计算
Step7: 2.5 数据变换
Step8: 基于单变元函数的数据变换可以使用一个统一的方式完成,使用preproccessing库的FunctionTransformer对数据进行对数函数转换的代码如下:
Step9: 2.6 回顾
Step10: 3.1.2 相关系数法
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
# 导入IRIS数据集
iris = load_iris()
# 特征矩阵
iris.data
# 目标微量
iris.target
from sklearn.preprocessing import StandardScaler
# 标准化,返回值为标准化后的数据
StandardScaler().fit_transform(iris.data)
from sklearn.preprocessing import MinMaxScaler
# 区间缩放,返回值为缩放到[0, 1]区间的数据
MinMaxScaler().fit_transform(iris.data)
from sklearn.preprocessing import Normalizer
# 归一化,返回值为归一化后的数据
Normalizer().fit_transform(iris.data)
from sklearn.preprocessing import Binarizer
# 二值化,阈值设置为3,返回值 为二值化后的数据
Binarizer(threshold=3).fit_transform(iris.data)
from sklearn.preprocessing import OneHotEncoder
# 哑编码,对数据的目标值,返回值为哑编码后的数据
OneHotEncoder().fit_transform(iris.target.reshape((-1,1)))
import numpy as np
from sklearn.preprocessing import Imputer
# 缺失值计算,返回值为计算缺失值后的数据
# 参数missing_value为缺失值的表示形式,默认为NaN
# 参数strategy为缺失值的填充方式,默认为mean(均值)
Imputer().fit_transform(\
np.vstack((np.array([np.nan, np.nan, np.nan, np.nan]),iris.data)))
from sklearn.preprocessing import PolynomialFeatures
# 多项式转换
# 参数degree为度,默认值为2
PolynomialFeatures().fit_transform(iris.data)
from sklearn.preprocessing import FunctionTransformer
#自定义转换函数为对数函数的数据变换
#第一个参数是单变元函数
FunctionTransformer(np.log1p).fit_transform(iris.data)
from sklearn.feature_selection import VarianceThreshold
# 方差选择法,返回值为特征选择后的数据
# 参数threshold为方差的阈值
VarianceThreshold(threshold=3).fit_transform(iris.data)
from sklearn.feature_selection import SelectKBest
from scipy.stats import pearsonr
# 选择K个最好的特征,返回选择特征后的数据
# 第一个参数为计算评估特征是否好的函数,该函数输入特征矩阵和目标向量,
# 输出二元组(评分,P值)的数组,数组第i项为第i个特征的评分和P值。
# 在此定义为计算相关系数
# 参数k为选择的特征个数
SelectKBest(lambda X, Y: tuple(map(tuple,np.array(list(map(lambda x:pearsonr(x, Y), X.T))).T)), k=2).fit_transform(iris.data, iris.target)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Follow this link for more info on the tagger
Step2: <span style="color
Step3: Thankfully, nltk provides documentation for each tag, which can be queried using the tag, e.g., nltk.help.upenn_tagset(‘RB’), or a regular expression. nltk also provides batch pos-tagging method for document pos-tagging
Step4: The list of all possible tags appears below
Step5: Nouns
Step6: Adjectives
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df0 = pd.read_csv("../data/interim/001_normalised_keyed_reviews.csv", sep="\t", low_memory=False)
df0.head()
# For monitoring duration of pandas processes
from tqdm import tqdm, tqdm_pandas
# To avoid RuntimeError: Set changed size during iteration
tqdm.monitor_interval = 0
# Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
# (can use `tqdm_gui`, `tqdm_notebook`, optional kwargs, etc.)
tqdm.pandas(desc="Progress:")
# Now you can use `progress_apply` instead of `apply`
# and `progress_map` instead of `map`
# can also groupby:
# df.groupby(0).progress_apply(lambda x: x**2)
def convert_text_to_list(review):
return review.replace("[","").replace("]","").replace("'","").replace("\t","").split(",")
# Convert "reviewText" field to back to list
df0['reviewText'] = df0['reviewText'].astype(str)
df0['reviewText'] = df0['reviewText'].progress_apply(lambda text: convert_text_to_list(text));
df0['reviewText'].head()
df0['reviewText'][12]
import nltk
nltk.__version__
# Split negs
def split_neg(review):
new_review = []
for token in review:
if '_' in token:
split_words = token.split("_")
new_review.append(split_words[0])
new_review.append(split_words[1])
else:
new_review.append(token)
return new_review
df0["reviewText"] = df0["reviewText"].progress_apply(lambda review: split_neg(review))
df0["reviewText"].head()
### Remove Stop Words
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
def remove_stopwords(review):
return [token for token in review if not token in stop_words]
df0["reviewText"] = df0["reviewText"].progress_apply(lambda review: remove_stopwords(review))
df0["reviewText"].head()
# from nltk.tag import StanfordPOSTagger
# from nltk import word_tokenize
# # import os
# # os.getcwd()
# # Add the jar and model via their path (instead of setting environment variables):
# jar = '../models/stanford-postagger-full-2017-06-09/stanford-postagger.jar'
# model = '../models/stanford-postagger-full-2017-06-09/models/english-left3words-distsim.tagger'
# pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')
# def pos_tag(review):
# if(len(review)>0):
# return pos_tagger.tag(review)
## Example
# text = pos_tagger.tag(word_tokenize("What's the airspeed of an unladen swallow ?"))
# print(text)
# tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: pos_tag(review)))
# tagged_df.head()
# from textblob import TextBlob
# def blob_pos_tagger(review):
# blob = TextBlob(" ".join(review))
# return blob.tags
# blob_tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: blob_pos_tagger(review)))
# blob_tagged_df.head()
nltk_tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: nltk.pos_tag(review)))
nltk_tagged_df.head()
nltk_tagged_df['reviewText'][8]
## Join with Original Key and Persist Locally to avoid RE-processing
uniqueKey_series_df = df0[['uniqueKey']]
uniqueKey_series_df.head()
pos_tagged_keyed_reviews = pd.concat([uniqueKey_series_df, nltk_tagged_df], axis=1);
pos_tagged_keyed_reviews.head()
pos_tagged_keyed_reviews.to_csv("../data/interim/002_pos_tagged_keyed_reviews.csv", sep='\t', header=True, index=False);
# Save a dictionary into a pickle file.
pos_tagged_keyed_reviews.to_pickle("../data/interim/002_pos_tagged_keyed_reviews.p")
def noun_collector(word_tag_list):
if(len(word_tag_list)>0):
return [word for (word, tag) in word_tag_list if tag in {'NN', 'NNS', 'NNP', 'NNPS'}]
nouns_df = pd.DataFrame(nltk_tagged_df['reviewText'].progress_apply(lambda review: noun_collector(review)))
nouns_df.head()
keyed_nouns_df = pd.concat([uniqueKey_series_df, nouns_df], axis=1);
keyed_nouns_df.head()
keyed_nouns_df.to_csv("../data/interim/002_keyed_nouns.csv", sep='\t', header=True, index=False);
# Save a dictionary into a pickle file.
keyed_nouns_df.to_pickle("../data/interim/002_keyed_nouns.p")
def adjective_collector(word_tag_list):
if(len(word_tag_list)>0):
return [word for (word, tag) in word_tag_list if tag in {'NN', 'JJR', 'JJS'}]
adjectives_df = pd.DataFrame(nltk_tagged_df['reviewText'].progress_apply(lambda review: noun_collector(review)))
nouns_df.head()
keyed_adjectives_df = pd.concat([uniqueKey_series_df, adjectives_df], axis=1);
keyed_adjectives_df.head()
keyed_adjectives_df.to_csv("../data/interim/002_adjectives_nouns.csv", sep='\t', header=True, index=False);
# Save a dictionary into a pickle file.
keyed_adjectives_df.to_pickle("../data/interim/002_keyed_adjectives.p")
# END OF FILE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test data
Step2: Causal Discovery
Step3: Total Effect
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import graphviz
import lingam
from lingam.utils import make_dot
print([np.__version__, pd.__version__, graphviz.__version__, lingam.__version__])
np.set_printoptions(precision=3, suppress=True)
np.random.seed(0)
x3 = np.random.uniform(size=10000)
x0 = 3.0*x3 + np.random.uniform(size=10000)
x2 = 6.0*x3 + np.random.uniform(size=10000)
x1 = 3.0*x0 + 2.0*x2 + np.random.uniform(size=10000)
x5 = 4.0*x0 + np.random.uniform(size=10000)
x4 = 8.0*x0 - 1.0*x2 + np.random.uniform(size=10000)
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4, x5]).T ,columns=['x0', 'x1', 'x2', 'x3', 'x4', 'x5'])
X.head()
m = np.array([[0.0, 0.0, 0.0, 3.0, 0.0, 0.0],
[3.0, 0.0, 2.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 6.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[8.0, 0.0,-1.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
make_dot(m)
model = lingam.DirectLiNGAM()
model.fit(X)
make_dot(model.adjacency_matrix_)
# x3 --> x1
te = model.estimate_total_effect(X, 3, 1)
print(f'total effect: {te:.3f}')
# x2 --> x1
te = model.estimate_total_effect(X, 2, 1)
print('total effect: {:.3f}'.format(te))
# x1 --> x3 (Warning!)
# The causal order of the destination variable is earlier than the source variable.
te = model.estimate_total_effect(X, 1, 3)
print('total effect: {:.3f}'.format(te))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note the query object loves to drop values. To keep from making multiple queries, save the data somewhere.
Step3: and a df of the tickets
Step4: Let's sell a couple more of those tickets.
Step5: The purpose is to create a simulation. So I'm just hacking something out real quick to make our model look a little fuller.
Step6: You should be able to load the storage explorer at this point and look at the relationships.
Step7: Ok now that we know what tickets we want to sell, let's push them to the digital twin ecosystem. This is exactly the same as what we did with Customers in step one.
Step8: Now establishing a realtionship between the lines and the areas. If a person leaves one area they have to get into the line of the next area in order to get there.
Step9: Now I'm going to add some people into different parts of the experience. This will change from user to user, but you can use this as an example of how this would work on your twin. Use the queries above to get the specific ids of your twins.
Step10: Sell a couple more tickets
|
<ASSISTANT_TASK:>
Python Code:
from azure.identity import AzureCliCredential
from azure.digitaltwins.core import DigitalTwinsClient
# using yaml instead of
import yaml
import uuid
# using altair instead of matplotlib for vizuals
import numpy as np
import pandas as pd
# you will get this from the ADT resource at portal.azure.com
your_digital_twin_url = "home-test-twin.api.wcus.digitaltwins.azure.net"
azure_cli = AzureCliCredential()
service_client = DigitalTwinsClient(
your_digital_twin_url, azure_cli)
service_client
query_expression = "SELECT * FROM digitaltwins t where IS_OF_MODEL('dtmi:billmanh:patron;1')"
query_result = service_client.query_twins(query_expression)
values = []
for i in query_result:
values.append(i)
df_customers = pd.DataFrame([[i['$dtId'],i['satisfaction']] for i in values],
columns=['id','satisfaction'])
df_customers
query_expression = f
SELECT * FROM digitaltwins t where IS_OF_MODEL('dtmi:mymodels:ticket;1') and t.state='open'
query_result = service_client.query_twins(query_expression)
available_tickets_df = pd.DataFrame([[i['$dtId'],i['event_title'],i['state']] for i in query_result],
columns = ['$dtId','event_title','state'])
available_tickets_df
tickets_sold = available_tickets_df.drop_duplicates(subset='event_title')['$dtId'].tolist()
tickets_sold
customers_sold = df_customers['id'].tolist()[5:7]
customers_sold
for c,t in enumerate(tickets_sold):
print(t)
tickethoder_relationship = {
"$relationshipId": f"{t}ownedBy{customers_sold[c]}",
"$sourceId": t,
"$relationshipName": "ownedBy",
"$targetId": customers_sold[c],
"bought_online": False
}
service_client.upsert_relationship(
tickethoder_relationship["$sourceId"],
tickethoder_relationship["$relationshipId"],
tickethoder_relationship
)
patch = [
{
"op": "replace",
"path": "",
"value": "sold"
}
]
service_client.update_component(t,"state", patch)
# # # Delete the model that you don't want.
# # service_client.delete_model(patron_model_id)
# # Create it if you just deleted it.
# # area_model_json = yaml.safe_load(open("models/area.json"))
# line_model_json = yaml.safe_load(open("models/Patron.json"))
# service_client.create_models([line_model_json])
for r in range(4):
digital_twin_id = f'area-{r}'
print(digital_twin_id)
dt_json = {
"$metadata": {
"$model": "dtmi:mymodels:area;1"
},
"capacity": 3,
"status": "open"
}
created_twin = service_client.upsert_digital_twin(digital_twin_id, dt_json)
print(created_twin)
for r in range(4):
digital_twin_id = f'line-{r}'
print(digital_twin_id)
dt_json = {
"$metadata": {
"$model": "dtmi:mymodels:line;1"
}
}
created_twin = service_client.upsert_digital_twin(digital_twin_id, dt_json)
print(created_twin)
def line_to_room(line,area,walking):
tickethoder_relationship = {
"$relationshipId": f"{line}leadsTo{area}",
"$sourceId": line,
"$relationshipName": "leadsTo",
"$targetId": area,
"walk_distance": walking
}
service_client.upsert_relationship(
tickethoder_relationship["$sourceId"],
tickethoder_relationship["$relationshipId"],
tickethoder_relationship
)
line_to_room("line-0","area-0",5)
line_to_room("line-1","area-1",5)
line_to_room("line-2","area-2",5)
line_to_room("line-3","area-3",5)
def cust_to_area(cust,area):
tickethoder_relationship = {
"$relationshipId": f"{cust}locatedIn{area}",
"$sourceId": cust,
"$relationshipName": "locatedIn",
"$targetId": area,
}
service_client.upsert_relationship(
tickethoder_relationship["$sourceId"],
tickethoder_relationship["$relationshipId"],
tickethoder_relationship
)
cust_to_area("customer-e6f49d8a-711b-41c3-9db8-c7ece3dbc32c","line-1")
cust_to_area("customer-21e17d28-76c3-4c04-8df9-396703692a68","line-1")
# cust_to_area("customer-25e19268-3433-4f09-afe3-94f466313368","line-0")
# cust_to_area("customer-c87adbfa-1c6e-4ea9-9f03-83e3877ef5fc","line-2")
def ticket_to_cust(ticket,cust):
tickethoder_relationship = {
"$relationshipId": f"{ticket}ownedBy{cust}",
"$sourceId": ticket,
"$relationshipName": "ownedBy",
"$targetId": cust,
}
service_client.upsert_relationship(
tickethoder_relationship["$sourceId"],
tickethoder_relationship["$relationshipId"],
tickethoder_relationship
)
ticket_to_cust("ticket-58bf00bf-d5b8-4d06-9002-f8bf1693cc56","customer-418cbc74-b101-4f50-8cf0-6075a2a8053c")
ticket_to_cust("ticket-67f3cbfb-4b35-4e10-bf6d-3fd216093c3c","customer-9c9b5c36-69f6-4f48-9362-4aaac4cb1be4")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring some features
Step2: Exploring the target column
Step3: 4. Now, let us explore the distribution of the column safe_loans.
Step4: Features for the classification algorithm
Step5: One-hot encoding
Step6: Build a decision tree classifier
Step7: Visualizing a learned model (Optional)
Step8: Making predictions
Step9: 12. Now, we will use our model to predict whether or not a loan is likely to default.
Step10: Quiz Question 1
Step11: Quiz Question 2
Step12: Quiz Question 3
Step13: 17. Now, evaluate the accuracy of the small_model and decision_tree_model on the entire validation_data, not just the subsample considered above.
Step14: Quiz Question 5
Step15: 19. Evaluate the accuracy of big_model on the training set and validation set.
Step16: Quiz Question 6
|
<ASSISTANT_TASK:>
Python Code:
loans = pd.read_csv('lending-club-data.csv')
loans.head(2)
loans.columns
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
#loans = loans.remove_column('bad_loans')
loans = loans.drop('bad_loans', axis=1)
loans['safe_loans'].value_counts()
print len(loans[loans['safe_loans']==+1]) *1.0/ len(loans['safe_loans'])
print len(loans[loans['safe_loans']==-1]) *1.0/ len(loans['safe_loans'])
features = ['grade', # grade of the loan
'sub_grade', # sub-grade of the loan
'short_emp', # one year or less of employment
'emp_length_num', # number of years of employment
'home_ownership', # home_ownership status: own, mortgage or rent
'dti', # debt to income ratio
'purpose', # the purpose of the loan
'term', # the term of the loan
'last_delinq_none', # has borrower had a delinquincy
'last_major_derog_none', # has borrower had 90 day or worse rating
'revol_util', # percent of available credit being used
'total_rec_late_fee', # total late fees received to day
]
target = 'safe_loans' # prediction target (y) (+1 means safe, -1 is risky)
# Extract the feature columns and target column
loans = loans[features + [target]]
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
#print loans_one_hot_encoded
loans = loans.drop(feature, axis=1)
for col in loans_one_hot_encoded.columns:
loans[col] = loans_one_hot_encoded[col]
print loans.head(2)
print loans.columns
#pd.get_dummies(loans, prefix=['col1', 'col2'])
with open('module-5-assignment-1-train-idx.json') as train_data_file:
train_idx = json.load(train_data_file)
with open('module-5-assignment-1-validation-idx.json') as validation_data_file:
validation_idx = json.load(validation_data_file)
print train_idx[:3]
print validation_idx[:3]
print len(train_idx)
print len(validation_idx)
train_data = loans.iloc[train_idx]
validation_data = loans.iloc[validation_idx]
print len(loans.dtypes )
train_Y = train_data['safe_loans'].as_matrix()
train_X = train_data.drop('safe_loans', axis=1).as_matrix()
print train_Y.shape
print train_X.shape
from sklearn.tree import DecisionTreeClassifier
decision_tree_model = DecisionTreeClassifier(max_depth=6)
decision_tree_model = decision_tree_model.fit(train_X, train_Y)
small_model = DecisionTreeClassifier(max_depth=2)
small_model = small_model.fit(train_X, train_Y)
import sklearn
print('The scikit-learn version is {}.'.format(sklearn.__version__))
from sklearn import tree
import graphviz
from os import system
dot_data = tree.export_graphviz(small_model, out_file='simple_tree.dot',
feature_names=train_data.columns,
class_names=['+1','-1'],
filled=True, rounded=True,
special_characters=True)
system("dot -Tpng simple_tree.dot -o simple_tree.png")
from IPython.display import Image
Image(filename='simple_tree.png')
validation_safe_loans = validation_data[validation_data[target] == 1]
validation_risky_loans = validation_data[validation_data[target] == -1]
sample_validation_data_risky = validation_risky_loans[0:2]
sample_validation_data_safe = validation_safe_loans[0:2]
sample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)
sample_validation_data
sample_validation_data_Y = sample_validation_data['safe_loans'].as_matrix()
sample_validation_data_X = sample_validation_data.drop('safe_loans', axis=1).as_matrix()
print sample_validation_data_Y.shape
print sample_validation_data_X.shape
decision_tree_model.predict(sample_validation_data_X)
decision_tree_model.predict_proba(sample_validation_data_X)
small_model.predict_proba(sample_validation_data_X)
small_model.score(train_X, train_Y)
decision_tree_model.score(train_X, train_Y)
validation_Y = validation_data['safe_loans'].as_matrix()
validation_X = validation_data.drop('safe_loans', axis=1).as_matrix()
print validation_Y.shape
print validation_X.shape
small_model.score(validation_X, validation_Y)
decision_tree_model.score(validation_X, validation_Y)
big_model = DecisionTreeClassifier(max_depth=10)
big_model = big_model.fit(train_X, train_Y)
big_model.score(train_X, train_Y)
big_model.score(validation_X, validation_Y)
predictions = decision_tree_model.predict(validation_X)
print type(predictions)
print type(validation_Y)
false_positives = ((predictions==1) * (validation_Y==-1)).sum()
print false_positives
false_negatives = ((predictions==-1) * (validation_Y==1)).sum()
print false_negatives
correct_predictions = (predictions==validation_Y).sum()
print correct_predictions
print 10000 * false_negatives + 20000 * false_positives
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'name':['Jack Fine','Kim Q. Danger','Jane 114 514 Smith', 'Zhongli']})
def g(df):
df.loc[df['name'].str.split().str.len() >= 3, 'middle_name'] = df['name'].str.split().str[1:-1]
for i in range(len(df)):
if len(df.loc[i, 'name'].split()) >= 3:
l = df.loc[i, 'name'].split()[1:-1]
s = l[0]
for j in range(1,len(l)):
s += ' '+l[j]
df.loc[i, 'middle_name'] = s
df.loc[df['name'].str.split().str.len() >= 2, 'last_name'] = df['name'].str.split().str[-1]
df.loc[df['name'].str.split().str.len() >= 2, 'name'] = df['name'].str.split().str[0]
df.rename(columns={'name': 'first name'}, inplace=True)
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load up our trajectory. This is the trajectory that we generated in the "Running a simulation in OpenMM and analyzing the results with mdtraj" example. The first step is to build the rmsd cache, which precalculates some values for the RMSD computation.
Step2: Lets compute all pairwise rmsds between conformations.
Step3: scipy.cluster implements the ward linkage algorithm (among others)
Step4: Lets plot the resulting dendrogram.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
%matplotlib inline
import mdtraj as md
import numpy as np
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy
traj = md.load('ala2.h5')
distances = np.empty((traj.n_frames, traj.n_frames))
for i in range(traj.n_frames):
distances[i] = md.rmsd(traj, traj, i)
print('Max pairwise rmsd: %f nm' % np.max(distances))
linkage = scipy.cluster.hierarchy.ward(distances)
plt.title('RMSD Ward hierarchical clustering')
scipy.cluster.hierarchy.dendrogram(linkage, no_labels=True, count_sort='descendent')
None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, shape = (None, 28, 28, 1), name = 'inputs')
targets_ = tf.placeholder(tf.float32, shape = (None, 28, 28, 1), name = 'targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2, 2), (2, 2), padding = 'same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2, 2), (2, 2), padding = 'same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool1, 8, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2, 2), (2, 2), padding = 'same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7, 7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3, 3), padding = 'same', activation = None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name = 'decoded')
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels = targets_, logits = logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
if ii % 100 == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2, 2), (2, 2), padding = 'same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2, 2), (2, 2), padding = 'same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2, 2), (2, 2), padding = 'same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7, 7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3, 3), padding = 'same', activation = tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3, 3), padding = 'same', activation = None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name = 'decoded')
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels = targets_, logits = logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 10
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
if ii % 100 == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Define some convenient functions that we will use many times later.
Step8: Interactive 1D mixture model problem
Step10: Goodness-of-fit test
Step11: Generate data from $q$
Step12: Plot the data and model $p$.
Step13: All the implemented tests take the data in the form of a data.Data object. This is just an encapsulation of the sample X. To construct data.Data we do the following
Step14: Optimization of test locations
Step15: Let us optimize the parameters of the test on tr. The optimization relies on autograd to compute the gradient. We will use a Gaussian kernel for the test.
Step16: The optimization procedure returns back
Step17: Goodness-of-fit test
Step18: Recall that the null hypothesis was $H_0
Step21: Exercise
Step22: Create the Gaussian variance difference problem
Step23: Optimize the parameters
Step24: Goodness-of-fit test
Step25: A complicated 2D mixture model
Step28: Complicated model
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import kgof
import kgof.data as data
import kgof.density as density
import kgof.goftest as gof
import kgof.kernel as kernel
import kgof.plot as plot
import kgof.util as util
import matplotlib
import matplotlib.pyplot as plt
import autograd.numpy as np
# import scipy.stats as stats
import kgof.plot
kgof.plot.set_default_matplotlib_options()
%%html
<style>
/* highlight */
.hl {
background-color: #eeee00;
padding: 1px 5px 1px 5px;
margin: 4px;
}
</style>
def prob2d_pqgauss(qmean=np.array([0, 0]), QVar=np.eye(2), seed=2):
Construct a problem where p = N(0, I), q = N(0, QVar).
Return p, a DataSource (for q)
p = density.Normal(np.array([0, 0]), np.eye(2))
ds = data.DSNormal(qmean, QVar)
return p, ds
def plot2d_pq(p, X, n_max=300, n_xdengrid=50, n_ydengrid=50,
xlim=None, ylim=None, margin=0, V=None, V0=None,
figw=8, figh=5):
A function to plot the model p, and the sample from q, along with other
information such as the learned test locations V.
p: UnnormalizedDensity (model)
X: nx2 data matrix
n_max: do not plot the data more than this number of points
n_xdengrid, n_ydengrid: number of points to use to plot the density contour
V: J x 2 matrix of J features (test locations) to show
V0: J x 2 matrix of J features. Initial values before optimization.
plt.figure(figsize=(figw, figh))
# plot the data
# plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.7, color='#8282FF', markersize=8);
n = X.shape[0]
n_sub = min(n, n_max)
plt.plot(X[:n_sub, 0], X[:n_sub, 1], 'bo', alpha=0.8, markeredgecolor='white', markersize=6);
xmin, ymin = np.min(X, 0)
xmax, ymax = np.max(X, 0)
if xlim is None:
xlim = [xmin-margin, xmax+margin]
if ylim is None:
ylim = [ymin-margin, ymax+margin]
try:
flogden = lambda Xt: p.log_normalized_den(Xt)
# call it to see if it is implemented.
flogden(np.array([[1,2]]))
except NotImplementedError as e:
#print 'Use log of the unnormalized density.'
flogden = lambda Xt: p.log_den(Xt)
# get a mesh to plot the contour of the density p
XX, YY, Logden = plot.box_meshgrid(flogden, xlim, ylim, nx=n_xdengrid, ny=n_ydengrid)
# plot the unnormalized density
Den = np.exp(Logden)
# list_colors = plt.cm.datad['Reds']
# list_colors = list(list_colors)
# list_colors[-1] = (1, 1, 1)
# lscm = matplotlib.colors.LinearSegmentedColormap("my_Reds", list_colors)
plt.contourf(XX, YY, Den, 7,
cmap=plot.get_density_cmap(),
alpha = 0.7
);
if V is not None:
# feature
plt.plot(V[:, 0], V[:, 1], 'm*', markeredgecolor='white',
label='Optimized', markersize=34)
if V0 is not None:
# plot the initial test locations (before the optimization)
plt.plot(V0[:, 0], V0[:, 1], 'k*', markeredgecolor='white',
label='Initial', markersize=34)
plt.xlim(xlim)
plt.ylim(ylim)
def func_fssd_power_criterion(p, X, k, V):
Return the value of the power criterion of FSSD.
p: model density
X: n x d data matrix
k: a Kernle
V: J x d numpy array. J test locations
dat = data.Data(X)
return gof.FSSD.power_criterion(p, dat, k, V, reg=1e-5, use_unbiased=False)
def plot1d_pq(p, X, func_obj=None, rescale_obj=False, xlim=None, n_dom=200, n_bins=20,
margin=0, V=None, V0=None,
figw=8, figh=5):
A function to plot the model p, and the sample from q, along with other
information such as the learned test locations V.
Create a 1D plot.
p: UnnormalizedDensity (model)
func_obj: a function: m x 1 -> m-array for computing the optimization
objective at m locations.
rescale_obj: if true, rescale the objective function to fit the height
of the histogram.
n_dom: number of points to use to plot the density
n_bins: number of bins to use for the histogram
X: nx2 data matrix
V: J x 1 matrix of J features (test locations) to show
V0: J x 1 matrix of J features. Initial values before optimization.
n, d = X.shape
assert d==1
assert V is None or V.shape[1] == 1
assert V0 is None or V0.shape[1] == 1
plt.figure(figsize=(figw, figh))
xmin = np.min(X, 0)
xmax = np.max(X, 0)
if xlim is None:
xlim = [xmin-margin, xmax+margin]
# plot the data as a histogram
hist_result = plt.hist(X, normed=True, bins=n_bins, alpha=0.8, label='Data')
hist_heights = hist_result[0]
hist_parts = hist_result[1]
try:
is_normalized = True
flogden = lambda Xt: p.log_normalized_den(Xt)
# call it to see if it is implemented.
flogden(np.array([[1]]))
except NotImplementedError as e:
#print 'Use log of the unnormalized density.'
flogden = lambda Xt: p.log_den(Xt)
is_normalized = False
dom = np.linspace(xlim[0], xlim[1], 200)
den = np.exp(flogden(dom[:, np.newaxis]))
if not is_normalized:
# If p is an unnormalized density, then
# normalize it to fit the height of the histogram
# (this is not technically correct. This is just a
# simple way to plot.)
hist_height = np.max(hist_heights)
max_den = np.max(den)
# renormalize
den = den/max_den*hist_height
# plot the density (may be unnormalized)
plt.plot(dom, den, 'r-', label=r'$p$')
if func_obj is not None:
# plot the optimization objective
objs = func_obj(dom[:, np.newaxis])
if rescale_obj:
# rescale to match the height of the histogram
max_obj = np.max(objs)
objs = objs/max_obj*hist_height*0.8
plt.plot(dom, objs, 'k-', label=r'Score function')
if V is not None:
# feature
plt.plot(V[:, 0], V[:, 1], 'm*', markeredgecolor='white',
label='Optimized', markersize=34)
if V0 is not None:
# plot the initial test locations (before the optimization)
plt.plot(V0[:, 0], V0[:, 1], 'k*', markeredgecolor='white',
label='Initial', markersize=34)
plt.xlim(xlim)
def prob_1dmixture(pm=0, pv=1, qm1=0, qv1=1, qm2=1, qv2=1, seed=3):
A 1D problem where both p and q are 2-component Gaussian mixture models.
p(x) = 0.5*N(0, 1) + 0.5*N(pm, pv)
q(x) = 0.5*N(qm1, qv1) + 0.5*N(qm2, qv2)
Return p and q (both are UnnormalizedDensity)
assert pv > 0
assert qv1 > 0
assert qv2 > 0
p = density.IsoGaussianMixture(means=np.array([[0], [pm]]),
variances=np.array([1, pv]))
q = density.IsoGaussianMixture(means=np.array([[qm1], [qm2]]),
variances=np.array([qv1, qv2]))
return p, q
def func_interactive_1dmixture(pm=0, pv=1, qm1=0, qv1=1, qm2=1, qv2=1):
seed = 84
p, q = prob_1dmixture(pm=pm, pv=pv, qm1=qm1, qv1=qv1, qm2=qm2,
qv2=qv2, seed=seed)
# n = sample size to draw from q
n = 600
gwidth2 = 1.5**2
# generate data from q
ds = q.get_datasource()
dat = ds.sample(n, seed=seed+3)
Xs = dat.data()
# kernel
k = kernel.KGauss(sigma2=gwidth2)
def score_function(Vs):
Vs: m x d test locations.
Evaluate the score at m locations
m = Vs.shape[0]
objs = np.zeros(m)
for i in range(m):
v = Vs[i, :]
obj = func_fssd_power_criterion(p, Xs, k, v[np.newaxis, :])
objs[i] = obj
return objs
# plot the problem
plot1d_pq(p, Xs, func_obj=score_function, rescale_obj=False,
margin=3, n_dom=100, n_bins=12, figw=8, figh=5)
print('p = 0.5*N(0, 1) + 0.5*N({}, {})'.format(pm, pv))
print('q = 0.5*N({}, {}) + 0.5*N({}, {})'.format(qm1, qv1, qm2, qv2))
plt.legend(loc='upper right', fontsize=20)
# from __future__ import print_function
from ipywidgets import interact, interactive, fixed
from IPython.display import display
import ipywidgets as widgets
vs = interactive(func_interactive_1dmixture, pm=(0, 10, 1), pv=(1e-3, 5, 1),
qm1=(-5, 10, 1), qv1=(1e-3, 5, 1), qm2=(-5, 10, 1),
qv2=(1e-3, 5, 1)
)
display(vs)
# Assume two dimensions.
d = 2
def isogauss_log_den(X):
Evaluate the log density of the standard isotropic Gaussian
at the points (rows) in X.
Note that the density is NOT normalized.
X: n x d nd-array
return a length-n array
# d = dimension of the input space
unden = -np.sum(X**2, 1)/2.0
return unden
# p is an UnnormalizedDensity object
p = density.from_log_den(d, isogauss_log_den)
# Let's assume that m = 1.
m = 1
# Draw n points from q
seed = 5
np.random.seed(seed)
n = 300
X = np.random.randn(n, 2) + np.array([m, 0])
plot2d_pq(p, X)
plt.axis('equal');
plt.grid()
# dat will be fed to the test.
dat = data.Data(X)
# We will use some portion of the data for parameter tuning, and the rest for testing.
tr, te = dat.split_tr_te(tr_proportion=0.5, seed=2)
# J is the number of test locations (or features). Typically not larger than 10.
J = 1
# There are many options for the optimization.
# Almost all of them have default values.
# Here, we will list a few to give you a sense of what you can control.
# Full options can be found in gof.GaussFSSD.optimize_locs_widths(..)
opts = {
'reg': 1e-2, # regularization parameter in the optimization objective
'max_iter': 50, # maximum number of gradient ascent iterations
'tol_fun':1e-4, # termination tolerance of the objective
}
# make sure to give tr (NOT te).
# do the optimization with the options in opts.
V_opt, gw_opt, opt_info = gof.GaussFSSD.optimize_auto_init(p, tr, J, **opts)
opt_info
# alpha = significance level of the test
alpha = 0.01
fssd_opt = gof.GaussFSSD(p, gw_opt, V_opt, alpha)
# return a dictionary of testing results
test_result = fssd_opt.perform_test(te)
test_result
Xtr = tr.data()
plot2d_pq(p, Xtr, V=V_opt, margin=0.5,
# , xlim=[-4, 4]
)
plt.axis('equal');
# plt.legend()
def init_params(Xtr, J, seed=40):
Return V0, gwidth0 where V0 is a J x d numpy array containing
J initial test locations before optimization. These are initialized
by drawing from a Gaussian fitted to the training set.
gwidth0 is the initial Gaussian width initialized by
the median heuristic.
Xtr: n x d numpy array of n data points
J: number of test locations required
# random test locations
V0 = util.fit_gaussian_draw(Xtr, J, seed=seed+11)
gwidth0 = util.meddistance(Xtr, subsample=1000)**2
return V0, gwidth0
def optimize_params(p, tr, V0, gwidth0):
Optimize the test locations and the Gaussian width on the trainig data.
p: the model. UnnormalizedDensity.
tr: data.Data object representing the training set
V0: J x d numpy array of J initial test locations
gwidth0: initial Gaussian width
opts: a dictionary containing options to the optimizer
Return V_opt, gw_opt, opt_info
# There are many options for the optimization.
# Almost all of them have default values.
# Here, we will list a few to give you a sense of what you can control.
# Full options can be found in gof.GaussFSSD.optimize_locs_widths(..)
opts = {
'reg': 1e-2, # regularization parameter in the optimization objective
'max_iter': 50, # maximum number of gradient ascent iterations
'tol_fun':1e-5, # termination tolerance of the objective
'gwidth_lb': 1**2, #absolute lower bound on the Gaussian width^2
'gwidth_ub': 10**2,
}
# make sure to give tr (NOT te).
# do the optimization with the options in opts.
# V_opt, gw_opt, opt_info = gof.GaussFSSD.optimize_auto_init(p, tr, J, **opts)
V_opt, gw_opt, opt_info = gof.GaussFSSD.optimize_locs_widths(p, tr, gwidth0, V0, **opts)
return V_opt, gw_opt, opt_info
# p is an UnnormalizedDensity object
seed = 30
n = 1500
p, ds = prob2d_pqgauss(qmean=np.array([0, 0]), QVar=np.diag([2, 0.5]), seed=seed)
dat = ds.sample(n, seed=seed+2)
X = dat.data()
# plot
plot2d_pq(p, X)
plt.axis('equal');
# We will use a portion (tr_proportion) of the data for parameter tuning
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed+8)
Xtr = tr.data()
# J is the number of test locations (or features). Typically not larger than 10.
J = 2
V0, gwidth0 = init_params(Xtr, J, seed=seed+3)
V_opt, gw_opt, opt_info = optimize_params(p, tr, V0, gwidth0)
opt_info
# alpha = significance level of the test
alpha = 0.01
fssd_opt = gof.GaussFSSD(p, gw_opt, V_opt, alpha)
# Goodness-of-fit test
# return a dictionary of testing results
test_result = fssd_opt.perform_test(te)
test_result
plot2d_pq(p, Xtr, V0=V0, V=V_opt, margin=0,
# , xlim=[-4, 4]
)
plt.axis('equal');
plt.legend(fontsize=20)
# k x d where k = number of mixture components
pmeans = np.array([[0, 0], [3, 3], [3, 0]])
pvariances = np.ones(3)*1
p = density.IsoGaussianMixture(pmeans, pvariances)
# q is a Gaussian mixture
qmeans = np.array([[0, 0], [3, 3], [0, 3]])
qvariances = pvariances
q = density.IsoGaussianMixture(qmeans, qvariances)
ds = q.get_datasource()
# generate some data from q
n = 800
dat = ds.sample(n, seed=seed+23)
X = dat.data()
plot2d_pq(p, X, margin=1)
# We will use a portion (tr_proportion) of the data for parameter tuning
tr, te = dat.split_tr_te(tr_proportion=0.5, seed=seed+8)
Xtr = tr.data()
# J is the number of test locations (or features). Typically not larger than 10.
J = 2
V0, gwidth0 = init_params(Xtr, J, seed=seed+3)
V_opt, gw_opt, opt_info = optimize_params(p, tr, V0, gwidth0)
opt_info
# alpha = significance level of the test
alpha = 0.01
fssd_opt = gof.GaussFSSD(p, gw_opt, V_opt, alpha)
# Goodness-of-fit test
# return a dictionary of testing results
test_result = fssd_opt.perform_test(te)
test_result
plot2d_pq(p, Xtr, V0=V0, V=V_opt, margin=0,
# , xlim=[-4, 4]
)
plt.axis('equal');
plt.legend(fontsize=20)
V_opt
gw_opt
def prob_rbm_perturb(B_scale=1, b_scale=1, c_scale=1,
B_perturb=0, b_perturb=0, c_perturb=0,
d=1, dh=8, seed=38):
B_scale, b_scale, c_scale: control scaling of the parameters of the RBMs
B_perturb, b_perturb, c_perturb: how much q differs from p
d: dimension of the input
dh: dimension of the hidden variables
Return p and q (both are UnnormalizedDensity)
assert B_scale >= 0
assert b_scale >= 0
assert c_scale >= 0
assert B_perturb >= 0
assert b_perturb >= 0
assert c_perturb >= 0
with util.NumpySeedContext(seed=seed):
# B = np.array([-1, 1])[np.random.randint(0, 2, (d, dh))]
B = np.random.randn(d, dh)*B_scale
b = np.random.rand(d)*b_scale - b_scale/2.0
c = np.random.rand(dh)*c_scale - c_scale/2.0
# the model p
p = density.GaussBernRBM(B, b, c)
# perturb parameters of p to construct q
Bq = B + np.random.randn(d, dh)*B_perturb
bq = b + np.random.randn(d)*b_perturb
cq = c + np.random.randn(dh)*c_perturb
# construct the density q
q = density.GaussBernRBM(Bq, bq, cq)
return p, q
def func_interactive_rbm_problem(B_scale=1, b_scale=1, c_scale=1,
B_perturb=0, b_perturb=0, c_perturb=0):
dh = 7
d = 1
seed = 84
# n = sample size to draw from q
n = 500
gwidth2 = 3**2
p, q = prob_rbm_perturb(B_scale=B_scale, b_scale=b_scale, c_scale=c_scale,
B_perturb=B_perturb, b_perturb=b_perturb, c_perturb=c_perturb,
d=d, dh=dh, seed=seed)
# generate data from q
ds = q.get_datasource()
# Sampling from the RBM relies on Gibbs. Just to make it cheaper,
# let us set the burnin iterations to be reasonably small.
ds.burnin = 1000
dat = ds.sample(n, seed=seed+37)
Xs = dat.data()
# kernel
k = kernel.KGauss(sigma2=gwidth2)
def score_function(Vs):
Vs: m x d test locations.
Evaluate the score at m locations
m = Vs.shape[0]
objs = np.zeros(m)
for i in range(m):
v = Vs[i, :]
obj = func_fssd_power_criterion(p, Xs, k, v[np.newaxis, :])
objs[i] = obj
return objs
# plot the problem
plot1d_pq(p, Xs, func_obj=score_function, rescale_obj=False,
margin=1, n_dom=100, n_bins=12, figw=10, figh=6)
plt.legend()
vs = interactive(func_interactive_rbm_problem, B_scale=(0, 2, 0.1),
b_scale=(0, 3, 0.2), c_scale=(0, 3, 0.2),
B_perturb=(0, 1, 0.05),
b_perturb=fixed(0), c_perturb=fixed(0)
)
display(vs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Set neural network hyperparameters (tidier at top of file!)
Step3: Set number of neurons for each layer
Step4: Define placeholders Tensors for inputs and labels
Step5: Define types of layers
Step6: Define dictionaries for storing weights and biases for each layer -- and initialize
Step7: Design neural network architecture
Step8: Build model
Step9: Define model's loss and its optimizer
Step10: Define evaluation metrics
Step11: Create op for variable initialization
Step12: Train the network in a session
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
lr = 0.1
epochs = 10
batch_size = 128
weight_initializer = tf.contrib.layers.xavier_initializer()
n_input = 784
n_dense_1 = 64
n_dense_2 = 64
n_classes = 10
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# dense layer with ReLU activation:
def dense(x, W, b):
z = tf.add(tf.matmul(x, W), b)
a = tf.nn.relu(z)
return a
bias_dict = {
'b1': tf.Variable(tf.zeros([n_dense_1])),
'b2': tf.Variable(tf.zeros([n_dense_2])),
'b_out': tf.Variable(tf.zeros([n_classes]))
}
weight_dict = {
'W1': tf.get_variable('W1', [n_input, n_dense_1], initializer=weight_initializer),
'W2': tf.get_variable('W2', [n_dense_1, n_dense_2], initializer=weight_initializer),
'W_out': tf.get_variable('W_out', [n_dense_2, n_classes], initializer=weight_initializer),
}
def network(x, weights, biases):
# two dense hidden layers:
dense_1 = dense(x, weights['W1'], biases['b1'])
dense_2 = dense(dense_1, weights['W2'], biases['b2'])
# linear output layer (softmax):
out_layer_z = tf.add(tf.matmul(dense_2, weights['W_out']), biases['b_out'])
return out_layer_z
predictions = network(x, weights=weight_dict, biases=bias_dict)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(cost)
# calculate accuracy by identifying test cases where the model's highest-probability class matches the true y label:
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy_pct = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100
initializer_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(initializer_op)
print("Training for", epochs, "epochs.")
# loop over epochs:
for epoch in range(epochs):
avg_cost = 0.0 # track cost to monitor performance during training
avg_accuracy_pct = 0.0
# loop over all batches of the epoch:
n_batches = int(mnist.train.num_examples / batch_size)
for i in range(n_batches):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# feed batch data to run optimization and fetching cost and accuracy:
_, batch_cost, batch_acc = session.run([optimizer, cost, accuracy_pct], feed_dict={x: batch_x, y: batch_y})
# accumulate mean loss and accuracy over epoch:
avg_cost += batch_cost / n_batches
avg_accuracy_pct += batch_acc / n_batches
# output logs at end of each epoch of training:
print("Epoch ", '%03d' % (epoch+1),
": cost = ", '{:.3f}'.format(avg_cost),
", accuracy = ", '{:.2f}'.format(avg_accuracy_pct), "%",
sep='')
print("Training Complete. Testing Model.\n")
test_cost = cost.eval({x: mnist.test.images, y: mnist.test.labels})
test_accuracy_pct = accuracy_pct.eval({x: mnist.test.images, y: mnist.test.labels})
print("Test Cost:", '{:.3f}'.format(test_cost))
print("Test Accuracy: ", '{:.2f}'.format(test_accuracy_pct), "%", sep='')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot Trajectories from User Profile Eval Dataset
Step2: Save
|
<ASSISTANT_TASK:>
Python Code:
# our lib
from lib.resnet50 import ResNet50
from lib.imagenet_utils import preprocess_input, decode_predictions
#keras
from keras.preprocessing import image
from keras.models import Model
import glob
def preprocess_img(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return(x,img)
# instantiate the model
base_model = ResNet50(include_top=False, weights='imagenet') #this will pull the weights from the folder
# cut the model to lower levels only
model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)
user_id = 106144465
#get images
folder = '../data_img_sample_item_view_sequences/'
img_files = glob.glob(folder+'*'+str(user_id)+'*')
print(img_files)
# make features
trajectory_features = np.empty((len(img_files),2048))
for i,img_file in enumerate(img_files):
x,img = preprocess_img(img_file) # preprocess
trajectory_features[i,:] = model.predict(x)[0,0,0,:]
red_traj = np.dot(trajectory_features,red_weights)
print('target class')
plt.figure(figsize=(12,6))
len_seq = len(img_files)
fig,axes = plt.subplots(2,len_seq)
# make color
color_red_black = pd.Series(red_traj>0).map({False:'k',True:'r'}).as_matrix()
for i in range(len_seq):
img = image.load_img(img_files[i], target_size=(224, 224))
# images
axes[0,i].imshow(img)
axes[0,i].set_xticklabels([])
#axes[0,i].get_xaxis().set_visible(False)
axes[0,i].get_xaxis().set_ticks([])
axes[0,i].get_yaxis().set_visible(False)
if i<(len_seq-1):
axes[0,i].set_xlabel('view '+str(i))
else:
axes[0,i].set_xlabel('buy')
# bar
axes[1,i].bar(0,red_traj[i],color=color_red_black[i])
axes[1,i].set_ylim([-10,5])
axes[1,i].get_xaxis().set_visible(False)
axes[1,i].axhline(y=0,linestyle='--',color='w')
if i==0:
print('here')
axes[1,i].set_ylabel('red classification')
else:
axes[1,i].get_yaxis().set_visible(False)
sns.despine()
savefile = '../figures/example_sequence_interpretable_features_ui_'+str(user_id)+'.png'
plt.savefig(savefile,dpi=300)
reload(src.s3_data_management)
from src import s3_data_management
s3_data_management.push_results_to_s3(os.path.basename(savefile),savefile)
# load weights from the nn
red_weights = np.loadtxt('../data_nn_features/class_weights_LR_redpink.txt')
# load smaller user behavior dataset
user_profile = pd.read_pickle('../data_user_view_buy/user_profile_items_nonnull_features_20_mins_5_views_v2_sample1000.pkl')
user_sample = user_profile.user_id.unique()
print(len(user_profile))
print(len(user_sample))
user_profile.head()
# read nn features
spu_fea = pd.read_pickle("../data_nn_features/spu_fea_sample1000.pkl")
spu_fea.head()
# sample users
size = 10
np.random.seed(1000)
user_ids = np.random.choice(user_profile.user_id.unique(),size=size)
fig,axes = plt.subplots(size,1,figsize=(16,3*size),sharex=True,sharey=True)
for ui,user_id in enumerate(user_ids):
# get his trajectory
trajectory = user_profile.loc[user_profile.user_id==user_id,]
# get trajectory features (make a separate function # )
trajectory_features = np.empty((len(trajectory),2048))
for i,(index,row) in enumerate(trajectory.iterrows()):
trajectory_features[i,:] = spu_fea.loc[spu_fea.spu_id==row['view_spu'],'features'].as_matrix()[0]
# project onto red dimension
red_traj = np.dot(trajectory_features,red_weights)
# plot
axes[ui].plot(np.arange(len(red_traj)),red_traj)
axes[ui].axhline(y=0,linestyle='--',color='k')
axes[ui].set_ylabel('red features')
sns.despine()
plt.xlabel('positition in sequence')
savefile = '../figures/example_sequences_red_10_users.png'
plt.savefig(savefile,dpi=300)
from sklearn.decomposition import PCA
trajectory.head()
#spu_fea.spu_id['spu_']
# read nn features
spu_fea = pd.read_pickle("../data_nn_features/spu_fea_sample1000.pkl")
# reduce dimensionality
pca = pickle.load(open('../data_nn_features/pca_all_items_sample1000.pkl','rb'))
plt.plot(red_traj,np.arange(len(red_traj)))
plt.xlabel('red features')
plt.ylabel('positition in sequence')
sns.despine()
len(spu_fea['features'].as_matrix()[0])
#spu_fea[row['view_spu']]
projection = pca.transform(X_item_feature[0,:].reshape(-1,1).T)
projection.shape
plt.plot(np.arange(100),projection[0,0:100])
plt.xlabel('component')
plt.ylabel('projection')
sns.despine()
# sample users
size = 10
np.random.seed(1000)
user_ids = np.random.choice(user_profile.user_id.unique(),size=size)
fig,axes = plt.subplots(size,1,figsize=(16,3*size),sharex=True,sharey=True)
for ui,user_id in enumerate(user_ids):
# get his trajectory
trajectory = user_profile.loc[user_profile.user_id==user_id,]
# get trajectory features (make a separate function # )
trajectory_features = np.empty((len(trajectory),2048))
for i,(index,row) in enumerate(trajectory.iterrows()):
trajectory_features[i,:] = spu_fea.loc[spu_fea.spu_id==row['view_spu'],'features'].as_matrix()[0]
# project onto pca dimension
projected_traj = pca.transform(trajectory_features)
# get first dimension
traj_PC1 = projected_traj[:,0]
traj_PC2 = projected_traj[:,1]
traj_PC3 = projected_traj[:,2]
# plot
axes[ui].plot(traj_PC1,label='PC1')
axes[ui].plot(traj_PC2,label='PC2')
axes[ui].plot(traj_PC3,label='PC3')
plt.legend()
axes[ui].axhline(y=0,linestyle='--',color='k')
axes[ui].set_ylabel('red features')
sns.despine()
plt.xlabel('positition in sequence')
savefile = '../figures/example_sequences_PCA_10_users.png'
plt.savefig(savefile,dpi=300)
%%bash
#jupyter nbconvert --to Plotting_Sequences_in_low_dimensions.ipynb && mv Plotting_Sequences_in_low_dimensions.slides.html ../notebook_slides/Plotting_Sequences_in_low_dimensions_v1.slides.html
jupyter nbconvert --to html Plotting_Sequences_in_low_dimensions.ipynb && mv Exploring_Data.html ../notebook_htmls/Plotting_Sequences_in_low_dimensions_v1.html
cp Plotting_Sequences_in_low_dimensions.ipynb ../notebook_versions/Plotting_Sequences_in_low_dimensions_v1.ipynb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear Regression
Step2: Train with separate month using all input
Step3: Train with separate month using outdoor temperature only
Step4: Train with seasonal data using outdoor temperature only
Step5: Regression Tree
Step6: Train with all data using outdoor temperature only
Step7: Train with separete month using all data
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#outdoor air temperature
oa = pd.read_csv("../data/oa_temp_utc_f.csv");
oa.columns = ['time', 'oa']
oa.set_index("time", drop = True, inplace = True);
oa.index = pd.to_datetime(oa.index)
oa = oa.replace('/', np.nan)
oa['oa'] = oa['oa'].astype(float)
oa = oa.interpolate(method = "time")
#relative humidity
rh = pd.read_csv("../data/rh_utc_perct.csv");
rh.columns = ['time', 'rh']
rh.set_index("time", drop = True, inplace = True);
rh.index = pd.to_datetime(rh.index)
rh = rh.replace('/', np.nan)
rh['rh'] = rh['rh'].astype(float)
rh = rh.interpolate(method = "time")
#solar radiation
sr = pd.read_csv("../data/total_solar_utc_btuh-ft2.csv");
sr.columns = ['time', 'sr'];
sr.set_index('time', drop = True, inplace = True);
sr.index = pd.to_datetime(sr.index)
sr = sr.replace('/', np.nan)
sr['sr'] = sr['sr'].astype(float)
sr = sr.interpolate(method = "time")
#wind speed
ws = pd.read_csv("../data/wind_speed_utc_mph.csv");
ws.columns = ['time', 'ws'];
ws.set_index('time', drop = True, inplace = True);
ws.index = pd.to_datetime(ws.index)
ws = ws.replace('/', np.nan)
ws['ws'] = ws['ws'].astype(float)
ws = ws.interpolate(method = "time")
#damper position of room 107n
dp = pd.read_csv("../data/107n_damper_utc_0to7or10.csv");
dp.columns = ['time', 'dp'];
dp.set_index('time', drop = True, inplace = True);
dp.index = pd.to_datetime(dp.index)
dp = dp.replace('/', np.nan)
dp['dp'] = dp['dp'].astype(float)
dp = dp.interpolate(method = "time")
#some data has scale 0-10, some has scale 0-7, so transfer 0-7 to 0-10
dpUpper = dp.loc['2014-06-26 00:10:00':'2015-02-02 14:40:00',:];
dpLower = dp.loc['2015-02-02 14:50:00':,:]
dpLower = dpLower.multiply(10.0/7.0, axis='columns');
frames = [dpUpper, dpLower]
dp = pd.concat(frames)
#supply air temperature of room 107n
st = pd.read_csv("../data/107n_vavtemp_utc_f.csv");
st.columns = ['time', 'st'];
st.set_index('time', drop = True, inplace = True);
st.index = pd.to_datetime(st.index)
st = st.replace('/', np.nan)
st['st'] = st['st'].astype(float)
st = st.interpolate(method = "time");
#indoor air temperature of 107n
at = pd.read_csv("../data/107n_temp_utc_f.csv");
at.columns = ['time', 'at'];
at.set_index('time', drop = True, inplace = True);
at.index = pd.to_datetime(at.index)
at = at.replace('/', np.nan)
at['at'] = at['at'].astype(float)
at = at.interpolate(method = "time");
#merge together, change original utc time to local time
allDataRaw = (oa.merge(rh,how = 'inner',left_index = True, right_index = True)
.merge(sr,how = 'inner',left_index = True, right_index = True)
.merge(ws,how = 'inner',left_index = True, right_index = True)
.merge(dp,how = 'inner',left_index = True, right_index = True)
.merge(st,how = 'inner',left_index = True, right_index = True)
.merge(at,how = 'inner',left_index = True, right_index = True));
import pytz
eastern = pytz.timezone('US/Eastern');
origIndex = allDataRaw.index;
newTimeIndex = origIndex.tz_localize(pytz.utc).tz_convert(eastern);
allDataRaw.index = newTimeIndex;
#add month, weekday, hour of day information
allDataRaw['month'] = allDataRaw.index;
allDataRaw['weekday'] = allDataRaw.index;
allDataRaw['hour'] = allDataRaw.index;
allDataRaw['month'] = (allDataRaw['month']
.apply(lambda x: x.month))
allDataRaw['weekday'] = (allDataRaw['weekday']
.apply(lambda x: x.weekday()))
allDataRaw['hour'] = (allDataRaw['hour']
.apply(lambda x: x.hour));
#seperate one year for train, one year for test
train = allDataRaw.loc['2014-11-01 00:00:00':'2015-11-01 00:00:00',:];
test = allDataRaw.loc['2015-11-01 00:10:00':'2016-11-01 00:10:00'];
from sklearn import tree
from sklearn import svm
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import cross_val_predict
from sklearn import metrics
# perform linear regresson using all data, all year round
X_train = train.ix[:,0:6].as_matrix()
X_test = test.ix[:,0:6].as_matrix()
y_train = train.ix[:,6].as_matrix()
y_test = test.ix[:,6].as_matrix()
coef = np.dot(np.linalg.pinv(X_train),y_train)
score = pd.DataFrame()
score_train = 1- np.sum((y_train - np.dot(X_train,coef))**2)/np.sum((y_train-y_train.mean())**2)
score_test = 1- np.sum((y_test - np.dot(X_test,coef))**2)/np.sum((y_test-y_test.mean())**2)
score = score.append(pd.Series([score_train,score_test]),ignore_index=True)
score.columns = ['Train', 'Test']
score
# perform linear regression on all input, based on month
score = pd.DataFrame()
for i in range(1,13):
X_train = train[train.index.month==i].ix[:,0:6].as_matrix()
X_test = test[test.index.month==i].ix[:,0:6].as_matrix()
y_train = train[train.index.month==i].ix[:,6].as_matrix()
y_test = test[test.index.month==i].ix[:,6].as_matrix()
coef = np.dot(np.linalg.pinv(X_train),y_train)
score_train = 1- np.sum((y_train - np.dot(X_train,coef))**2)/np.sum((y_train-y_train.mean())**2)
score_test = 1- np.sum((y_test - np.dot(X_test,coef))**2)/np.sum((y_test-y_test.mean())**2)
score = score.append(pd.Series([score_train,score_test]),ignore_index=True)
score.columns = ['Train', 'Test']
score
# using temperature only, based on month
score = pd.DataFrame()
for i in range(1,13):
X_train = train[train.index.month==i].ix[:,0:1].as_matrix()
X_test = test[test.index.month==i].ix[:,0:1].as_matrix()
y_train = train[train.index.month==i].ix[:,1].as_matrix()
y_test = test[test.index.month==i].ix[:,1].as_matrix()
coef = np.dot(np.linalg.pinv(X_train),y_train)
score_train = 1- np.sum((y_train - np.dot(X_train,coef))**2)/np.sum((y_train-y_train.mean())**2)
score_test = 1- np.sum((y_test - np.dot(X_test,coef))**2)/np.sum((y_test-y_test.mean())**2)
score = score.append(pd.Series([score_train,score_test]),ignore_index=True)
score.columns = ['Train', 'Test']
score
# perform linear regression with temperature only, based on season (1-3, 4-6, 7-9, 10-12)
score = pd.DataFrame()
for i in range(4):
X_train = train[np.logical_and(train.index.month>i*3, train.index.month<i*3+4)].ix[:,0:1].as_matrix()
X_test = test[np.logical_and(test.index.month>i*3, test.index.month<i*3+4)].ix[:,0:1].as_matrix()
y_train = train[np.logical_and(train.index.month>i*3, train.index.month<i*3+4)].ix[:,1].as_matrix()
y_test = test[np.logical_and(test.index.month>i*3, test.index.month<i*3+4)].ix[:,1].as_matrix()
coef = np.dot(np.linalg.pinv(X_train),y_train)
score_train = 1- np.sum((y_train - np.dot(X_train,coef))**2)/np.sum((y_train-y_train.mean())**2)
score_test = 1- np.sum((y_test - np.dot(X_test,coef))**2)/np.sum((y_test-y_test.mean())**2)
score = score.append(pd.Series([score_train,score_test]),ignore_index=True)
score.columns = ['Train', 'Test']
score
# confiture features and output
X_train = train.ix[:,0:6].copy()
X_test = test.ix[:,0:6].copy()
y_train = train.ix[:,6].copy()
y_test = test.ix[:,6].copy()
# train the regression tree
reg_tree = tree.DecisionTreeRegressor()
reg_tree.fit(X_train,y_train)
result = reg_tree.feature_importances_
# test the regression tree with test data
print('The test score is '+ str(reg_tree.score(X_test,y_test)))
# present the feature importantce
feature_importance = pd.DataFrame()
feature_importance = feature_importance.append(pd.Series(result),ignore_index=True)
feature_importance.columns = ['oa', 'rh', 'sr', 'ws', 'dp', 'st']
feature_importance.index = ['feature importance']
feature_importance
fig,ax = plt.subplots(facecolor='w')
ax.scatter(reg_tree.predict(X_test),y_test, color='b',label='Decision Regression Tree Using All Features')
ax.set_xlabel('Predicted value for Test Data')
ax.set_ylabel('True value for Test Data')
ax.legend()
plt.show()
fig,ax = plt.subplots(facecolor='w')
num = len(test[test.index.month==1].index)
x = np.linspace(1,num,num)
ax.plot(x,test[test.index.month==1]['at'],color='r',label='True Value')
ax.plot(x,reg_tree.predict(X_test[X_test.index.month==1]),color='b',label='Predicted Value')
ax.legend()
ax.set_xlabel('2016-01-01 to 2016-01-31')
ax.set_ylabel('Indoor Air Temperature (F)')
plt.show()
# use only temeprature to train
# confiture features and output
X_train = train.ix[:,0:1].copy()
X_test = test.ix[:,0:1].copy()
y_train = train.ix[:,6].copy()
y_test = test.ix[:,6].copy()
# train the regression tree
reg_tree = tree.DecisionTreeRegressor()
reg_tree.fit(X_train,y_train)
result = reg_tree.feature_importances_
# test the regression tree with test data
print('The test score is '+ '%.4f'% reg_tree.score(X_test,y_test))
# use data from a particular month to train
feature_importance = pd.DataFrame()
for i in range(1,13):
# confiture features and output
X_train = train[train.index.month==i].ix[:,0:6].copy()
X_test = test[test.index.month==i].ix[:,0:6].copy()
y_train = train[train.index.month==i].ix[:,6].copy()
y_test = test[test.index.month==i].ix[:,6].copy()
# train the regression tree
reg_tree = tree.DecisionTreeRegressor()
reg_tree.fit(X_train,y_train)
result = reg_tree.feature_importances_
# test the regression tree with test data
print('The test score of month ' +str(i)+' is '+ '%.4f'% reg_tree.score(X_train,y_train)+' using training data, '+'%.4f'% reg_tree.score(X_test,y_test)+' using testing data.')
# present the feature importantce
feature_importance = feature_importance.append(pd.Series(result),ignore_index=True)
feature_importance.columns = ['oa', 'rh', 'sr', 'ws', 'dp', 'st']
#feature_importance
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that a network is defined, we can create a GenericPhase object associated with it. For this demo we'll make an oil phase, so let's call it oil
Step2: As can be seen in the above printout, this phase has a temperature and pressure set at all locations, but has no other physical properties.
Step3: As can be seen, this puts the value of 100.0 g/mol in every pore. Note that you could also assign each pore explicitly with a numpy array. OpenPNM automatically assigns a scalar value to every location as shown above.
Step4: You can also specify something like viscosity this way as well, but it's not recommended
Step5: The problem with specifying the viscosity as a hard-coded value is that viscosity is a function of temperature (among other things), so if we adjust the temperature on the oil object it will have no effect on the hard-coded viscosity
Step6: The correct way to specify something like viscosity is to use pore-scale models. There is a large libary of pre-written models in the openpnm.models submodule. For instance, a polynomial can be used as follows
Step7: We can now see that our previously written values of viscosity (1600.0) have been overwritten by the values coming from the model
Step8: And moreover, if we change the temperature the model will update the viscosity values
Step9: Note the call to regenerate_models, which is necessary to actually re-run the model using the new temperature.
Step10: We can reach into this dictionary and alter the parameters of the model if necessary
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import openpnm as op
pn = op.network.Cubic(shape=[3, 3, 3], spacing=1e-4)
print(pn)
oil = op.phases.GenericPhase(network=pn)
print(oil)
oil['pore.molecular_mass'] = 100.0 # g/mol
print(oil['pore.molecular_mass'])
oil['pore.molecular_mass'] = np.ones(shape=[pn.Np, ])*120.0
print(oil['pore.molecular_mass'])
oil['pore.viscosity'] = 1600.0 # cP
oil['pore.temperature'] = 100.0 # C
print(oil['pore.viscosity'])
mod = op.models.misc.polynomial
oil.add_model(propname='pore.viscosity', model=mod,
a=[1600, 12, -0.05], prop='pore.temperature')
print(oil['pore.viscosity'])
oil['pore.temperature'] = 40.0 # C
oil.regenerate_models()
print(oil['pore.viscosity'])
print(oil.models)
oil.models['pore.viscosity']['a'] = [1200, 10, -0.02]
oil.regenerate_models()
print(oil['pore.viscosity'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using Yellowbrick to Explore Book Reviews
Step2: Visualizing Stopwords Removal
Step3: Visualizing tokens across corpora
Step4: t-SNE
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets.base import Bunch
## The path to the test data sets
FIXTURES = os.path.join(os.getcwd(), "data")
## Dataset loading mechanisms
datasets = {
"reviews": os.path.join(FIXTURES, "reviews")
}
def load_data(name, download=True):
Loads and wrangles the passed in text corpus by name.
If download is specified, this method will download any missing files.
# Get the path from the datasets
path = datasets[name]
# Read the files in the directory as the categories.
categories = [
os.path.splitext(f)[0] for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))
and os.path.join(path, f).endswith(".csv")
]
files = [] # holds the file names relative to the root
data = [] # holds the text read from the file
target = [] # holds the string of the category
# Load the data from the files in the corpus
for cat in categories:
files.append(os.path.join(path, cat + '.csv'))
with open(os.path.join(path, cat + '.csv'), 'r') as f:
content = f.read()
docs = [s.strip() for s in content.splitlines()]
for doc in docs[:1000]: # limited size so nb won't crash
data.append(doc)
target.append(cat)
# Return the data bunch for use similar to the newsgroups example
return Bunch(
categories=categories,
files=files,
data=data,
target=target,
)
corpus = load_data('reviews')
from yellowbrick.text.freqdist import FreqDistVisualizer
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer()
visualizer.fit(docs, features)
visualizer.show()
vectorizer = CountVectorizer(stop_words='english')
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer()
visualizer.fit(docs, features)
visualizer.show()
male = ['Andy-Weir-The-Martian',
'John-Green-The-Fault-in-our-Stars']
female = ['Laura-Hillenbrand-Unbroken',
'Paula_Hawkins-The-Girl-On-The-Train',
'Suzanne-Collins-The-Hunger-Games',
'Donna-Tartt-The-Goldfinch',
'EL-James-Fifty-Shades-of-Grey',
'Fillian_Flynn-Gone_Girl']
male_author_reviews = []
female_author_reviews = []
for book in male:
for idx in range(len(corpus.data)):
if corpus.target[idx] == book:
male_author_reviews.append(corpus.data[idx])
for book in female:
for idx in range(len(corpus.data)):
if corpus.target[idx] == book:
female_author_reviews.append(corpus.data[idx])
vectorizer = CountVectorizer(stop_words='english')
docs = vectorizer.fit_transform(text for text in female_author_reviews)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer()
visualizer.fit(docs, features)
visualizer.show()
vectorizer = CountVectorizer(stop_words='english')
docs = vectorizer.fit_transform(text for text in male_author_reviews)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer()
visualizer.fit(docs, features)
visualizer.show()
from yellowbrick.text import TSNEVisualizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
docs = tfidf.fit_transform(corpus.data)
labels = corpus.target
# Create the visualizer and draw the vectors
tsne = TSNEVisualizer()
tsne.fit(docs, labels)
tsne.show()
# Only visualize the books by female authors
tsne = TSNEVisualizer(classes=female)
tsne.fit(docs, labels)
tsne.show()
# Only visualize the books by male authors
tsne = TSNEVisualizer(classes=male)
tsne.fit(docs, labels)
tsne.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 기본 훈련 루프
Step2: 머신러닝 문제 해결하기
Step3: 텐서는 일반적으로 배치 또는 입력과 출력이 함께 쌓인 그룹의 형태로 수집됩니다. 일괄 처리는 몇 가지 훈련 이점을 제공할 수 있으며 가속기 및 벡터화된 계산에서 잘 동작합니다. 데이터세트가 얼마나 작은지를 고려할 때 전체 데이터세트를 단일 배치로 처리할 수 있습니다.
Step4: 초기 변수는 여기에서 고정된 방식으로 설정되지만 Keras에는 나머지 Keras의 유무에 관계없이 사용할 수 있는 여러 초기화 프로그램이 함께 제공됩니다.
Step5: 모델을 훈련하기 전에 모델의 예측을 빨간색으로, 훈련 데이터를 파란색으로 플롯하여 손실값을 시각화할 수 있습니다.
Step6: 훈련 루프 정의하기
Step7: 훈련을 살펴보려면 훈련 루프를 통해 x 및 y의 같은 배치를 보내고 W 및 b가 발전하는 모습을 확인합니다.
Step8: 같은 솔루션이지만, Keras를 사용한 경우
Step9: 모델을 생성할 때마다 새로운 훈련 루프를 작성하는 대신 Keras의 내장 기능을 바로 가기로 사용할 수 있습니다. Python 훈련 루프를 작성하거나 디버그하지 않으려는 경우 유용할 수 있습니다.
Step10: Keras fit 배치 데이터 또는 전체 데이터세트를 NumPy 배열로 예상합니다. NumPy 배열은 배치로 분할되며, 기본 배치 크기는 32입니다.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
# The actual line
TRUE_W = 3.0
TRUE_B = 2.0
NUM_EXAMPLES = 1000
# A vector of random x values
x = tf.random.normal(shape=[NUM_EXAMPLES])
# Generate some noise
noise = tf.random.normal(shape=[NUM_EXAMPLES])
# Calculate y
y = x * TRUE_W + TRUE_B + noise
# Plot all the data
import matplotlib.pyplot as plt
plt.scatter(x, y, c="b")
plt.show()
class MyModel(tf.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.w * x + self.b
model = MyModel()
# List the variables tf.modules's built-in variable aggregation.
print("Variables:", model.variables)
# Verify the model works
assert model(3.0).numpy() == 15.0
# This computes a single loss value for an entire batch
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(target_y - predicted_y))
plt.scatter(x, y, c="b")
plt.scatter(x, model(x), c="r")
plt.show()
print("Current loss: %1.6f" % loss(y, model(x)).numpy())
# Given a callable model, inputs, outputs, and a learning rate...
def train(model, x, y, learning_rate):
with tf.GradientTape() as t:
# Trainable variables are automatically tracked by GradientTape
current_loss = loss(y, model(x))
# Use GradientTape to calculate the gradients with respect to W and b
dw, db = t.gradient(current_loss, [model.w, model.b])
# Subtract the gradient scaled by the learning rate
model.w.assign_sub(learning_rate * dw)
model.b.assign_sub(learning_rate * db)
model = MyModel()
# Collect the history of W-values and b-values to plot later
Ws, bs = [], []
epochs = range(10)
# Define a training loop
def training_loop(model, x, y):
for epoch in epochs:
# Update the model with the single giant batch
train(model, x, y, learning_rate=0.1)
# Track this before I update
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
current_loss = loss(y, model(x))
print("Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f" %
(epoch, Ws[-1], bs[-1], current_loss))
print("Starting: W=%1.2f b=%1.2f, loss=%2.5f" %
(model.w, model.b, loss(y, model(x))))
# Do the training
training_loop(model, x, y)
# Plot it
plt.plot(epochs, Ws, "r",
epochs, bs, "b")
plt.plot([TRUE_W] * len(epochs), "r--",
[TRUE_B] * len(epochs), "b--")
plt.legend(["W", "b", "True W", "True b"])
plt.show()
# Visualize how the trained model performs
plt.scatter(x, y, c="b")
plt.scatter(x, model(x), c="r")
plt.show()
print("Current loss: %1.6f" % loss(model(x), y).numpy())
class MyModelKeras(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def call(self, x):
return self.w * x + self.b
keras_model = MyModelKeras()
# Reuse the training loop with a Keras model
training_loop(keras_model, x, y)
# You can also save a checkpoint using Keras's built-in support
keras_model.save_weights("my_checkpoint")
keras_model = MyModelKeras()
# compile sets the training parameters
keras_model.compile(
# By default, fit() uses tf.function(). You can
# turn that off for debugging, but it is on now.
run_eagerly=False,
# Using a built-in optimizer, configuring as an object
optimizer=tf.keras.optimizers.SGD(learning_rate=0.1),
# Keras comes with built-in MSE error
# However, you could use the loss function
# defined above
loss=tf.keras.losses.mean_squared_error,
)
print(x.shape[0])
keras_model.fit(x, y, epochs=10, batch_size=1000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
Step2: Keras supports different Merge strategies
Step3: Here we insert the auxiliary loss, allowing the LSTM and Embedding layer to be trained smoothly even though the main loss will be much higher in the model.
Step4: At this point, we feed into the model our auxiliary input data by concatenating it with the LSTM output
Step5: Model Definition
Step6: We compile the model and assign a weight of 0.2 to the auxiliary loss.
|
<ASSISTANT_TASK:>
Python Code:
# let's load MNIST data as we did in the exercise on MNIST with FC Nets
# %load ../solutions/sol_821.py
## try yourself
## `evaluate` the model on test data
from keras.layers import Input, Embedding, LSTM, Dense
from keras.models import Model
# Headline input: meant to receive sequences of 100 integers, between 1 and 10000.
# Note that we can name any layer by passing it a "name" argument.
main_input = Input(shape=(100,), dtype='int32', name='main_input')
# This embedding layer will encode the input sequence
# into a sequence of dense 512-dimensional vectors.
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
# A LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
lstm_out = LSTM(32)(x)
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
from keras.layers import concatenate
auxiliary_input = Input(shape=(5,), name='aux_input')
x = concatenate([lstm_out, auxiliary_input])
# We stack a deep densely-connected network on top
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
# And finally we add the main logistic regression layer
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output])
model.compile(optimizer='rmsprop',
loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'},
loss_weights={'main_output': 1., 'aux_output': 0.2})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Magics!
Step3: Numpy
Step4: Matplotlib and Numpy
|
<ASSISTANT_TASK:>
Python Code:
names = ['alice', 'jonathan', 'bobby']
ages = [24, 32, 45]
ranks = ['kinda cool', 'really cool', 'insanely cool']
for (name, age, rank) in zip(names, ages, ranks):
print(name, age, rank)
for index, (name, age, rank) in enumerate(zip(names, ages, ranks)):
print(index, name, age, rank)
# return, esc, shift+enter, ctrl+enter
# text keyboard shortcuts -- cmd > (right), < left,
# option delete (deletes words)
# keyboard shortcuts
# - a, b, y, m, dd, h, ctrl+shift+-
%matplotlib inline
%config InlineBackend.figure_format='retina'
import matplotlib.pyplot as plt
# no pylab
import seaborn as sns
sns.set_context('talk')
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import numpy as np
# don't do `from numpy import *`
import pandas as pd
# If you have a specific function that you'd like to import
from numpy.random import randn
x = np.arange(100)
y = np.sin(x)
plt.plot(x, y);
%matplotlib notebook
x = np.arange(10)
y = np.sin(x)
plt.plot(x, y)#;
%%bash
for num in {1..5}
do
for infile in *;
do
echo $num $infile
done
wc $infile
done
print("hi")
!pwd
!ping google.com
this_is_magic = "Can you believe you can pass variables and strings like this?"
!echo $this_is_magic
hey
x = np.arange(10000)
print(x) # smart printing
print(x[0]) # first element
print(x[-1]) # last element
print(x[0:5]) # first 5 elements (also x[:5])
print(x[:]) # "Everything"
print(x[-5:]) # last five elements
print(x[-5:-2])
print(x[-5:-1]) # not final value -- not inclusive on right
x = np.random.randint(5, 5000, (3, 5))
x
np.sum(x)
x.sum()
np.sum(x)
np.sum(x, axis=0)
np.sum(x, axis=1)
x.sum(axis=1)
# Multi dimension array slice with a comma
x[:, 2]
y = np.linspace(10, 20, 11)
y
np.linspace?
np.linspace()
# shift-tab; shift-tab-tab
np.
def does_it(first=x, second=y):
This is my doc
pass
y[[3, 5, 7]]
does_it()
num = 3000
x = np.linspace(1.0, 300.0, num)
y = np.random.rand(num)
z = np.sin(x)
np.savetxt("example.txt", np.transpose((x, y, z)))
%less example.txt
!wc example.txt
!head example.txt
#Not a good idea
a = []
b = []
for line in open("example.txt", 'r'):
a.append(line[0])
b.append(line[2])
a[:10] # Whoops!
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(line[0])
b.append(line[2])
a[:10] # Strings!
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(float(line[0]))
b.append(float(line[2]))
a[:10] # Lists!
# Do this!
a, b = np.loadtxt("example.txt", unpack=True, usecols=(0,2))
a
from numpy.random import randn
num = 50
x = np.linspace(2.5, 300, num)
y = randn(num)
plt.scatter(x, y)
y > 1
y[y > 1]
y[(y < 1) & (y > -1)]
plt.scatter(x, y, c='b', s=50)
plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r', s=50)
y[~((y < 1) & (y > -1))] = 1.0
plt.scatter(x, y, c='b')
plt.scatter(x, np.clip(y, -0.5, 0.5), color='red')
num = 350
slope = 0.3
x = randn(num) * 50. + 150.0
y = randn(num) * 5 + x * slope
plt.scatter(x, y, c='b')
# plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r')
# np.argsort, np.sort, complicated index slicing
dframe = pd.DataFrame({'x': x, 'y': y})
g = sns.jointplot('x', 'y', data=dframe, kind="reg")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Asset prices have time-varying volatility (variance of day over day returns). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
Step2: Specifying the model in pymc3 mirrors its statistical specification.
Step3: Fit Model
Step4: Looking at the returns over time and overlaying the estimated standard deviation we can see how the model tracks the volatility over time.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pymc3 as pm
from pymc3.distributions.timeseries import GaussianRandomWalk
from scipy import optimize
%pylab inline
n = 400
returns = np.genfromtxt("../data/SP500.csv")[-n:]
returns[:5]
plt.plot(returns)
model = pm.Model()
with model:
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
nu = pm.Exponential('nu', 1./10)
s = GaussianRandomWalk('s', sigma**-2, shape=n)
r = pm.StudentT('r', nu, lam=pm.math.exp(-2*s), observed=returns)
with model:
trace = pm.sample(2000)
figsize(12,6)
pm.traceplot(trace, model.vars[:-1]);
figsize(12,6)
title(str(s))
plot(trace[s][::10].T,'b', alpha=.03);
xlabel('time')
ylabel('log volatility')
plot(np.abs(returns))
plot(np.exp(trace[s][::10].T), 'r', alpha=.03);
sd = np.exp(trace[s].T)
xlabel('time')
ylabel('absolute returns')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoder Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step40: Batch and pad the source and target sequences
Step43: Train
Step45: Save Parameters
Step47: Checkpoint
Step50: Sentence to Sequence
Step52: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
source_ids = [[source_vocab_to_int[word] for word in line.split()] for line in source_text.split('\n')]
target_ids = [[target_vocab_to_int[word] for word in (line + ' <EOS>').split()] for line in target_text.split('\n')]
#print('source text: \n', source_words[:50], '\n\n')
#print('target text: \n', target_words[:50], '\n\n')
return source_ids, target_ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, shape = (None, None), name = 'input')
targets = tf.placeholder(tf.int32, shape = (None, None), name = 'targets')
learning_rate = tf.placeholder(tf.float32, name = 'learning_rate')
keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')
tgt_seq_length = tf.placeholder(tf.int32, (None,), name = 'target_sequence_length')
src_seq_length = tf.placeholder(tf.int32, (None,), name = 'source_sequence_length')
max_tgt_seq_length = tf.reduce_max(tgt_seq_length, name = 'max_target_sequence_length')
return inputs, targets, learning_rate, keep_prob, tgt_seq_length, max_tgt_seq_length, src_seq_length
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0,0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_encoding_input(process_decoder_input)
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
# TODO: Implement Function
enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs,
source_vocab_size,
encoding_embedding_size)
# RNN cell
def make_cell(rnn_size):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer = tf.random_uniform_initializer(-0.1, 0.1, seed = 2))
return enc_cell
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_cell = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob = keep_prob)
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length = source_sequence_length, dtype = tf.float32)
return enc_output, enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
# TODO: Implement Function
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs = dec_embed_input,
sequence_length = target_sequence_length,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell = dec_cell,
helper = training_helper,
initial_state = encoder_state,
output_layer = output_layer)
dec_outputs = tf.contrib.seq2seq.dynamic_decode(decoder = training_decoder,
impute_finished = True,
maximum_iterations = max_summary_length)[0]
#train_logits = output_layer(dec_outputs)
return dec_outputs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
# TODO: Implement Function
# tile the start tokens for inference helper
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype = tf.int32), [batch_size], name = 'start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = dec_embeddings,
start_tokens = start_tokens,
end_token = end_of_sequence_id)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(cell = dec_cell,
helper = inference_helper,
initial_state = encoder_state,
output_layer = output_layer)
decoder_outputs = tf.contrib.seq2seq.dynamic_decode(decoder = inference_decoder,
impute_finished = True,
maximum_iterations = max_target_sequence_length)[0]
return decoder_outputs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
# TODO: Implement Function
# Embed the target sequences
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Construct decoder LSTM cell
def make_cell(rnn_size):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer = tf.random_uniform_initializer(-0.1, 0.1, seed = 2))
return cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# Create output layer
output_layer = Dense(target_vocab_size, kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev = 0.1))
# Use decoding_layer_train to get training logits
with tf.variable_scope("decode"):
train_logits = decoding_layer_train(encoder_state = encoder_state,
dec_cell = dec_cell,
dec_embed_input = dec_embed_input,
target_sequence_length = target_sequence_length,
max_summary_length = max_target_sequence_length,
output_layer = output_layer,
keep_prob = keep_prob)
# end with
# Use decoding_layer_infer to get logits at inference time
with tf.variable_scope("decode", reuse = True):
inference_logits = decoding_layer_infer(encoder_state = encoder_state,
dec_cell = dec_cell,
dec_embeddings = dec_embeddings,
start_of_sequence_id = target_vocab_to_int['<GO>'],
end_of_sequence_id = target_vocab_to_int['<EOS>'],
max_target_sequence_length = max_target_sequence_length,
vocab_size = target_vocab_size,
output_layer = output_layer,
batch_size = batch_size,
keep_prob = keep_prob)
# end with
return train_logits, inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
# TODO: Implement Function
# Encode input using encoding_layer
_, enc_state = encoding_layer( rnn_inputs = input_data,
rnn_size = rnn_size,
num_layers = num_layers,
keep_prob = keep_prob,
source_sequence_length = source_sequence_length,
source_vocab_size = source_vocab_size,
encoding_embedding_size = enc_embedding_size)
# Process target data using process_decoder_input
dec_input = process_decoder_input(target_data = target_data,
target_vocab_to_int = target_vocab_to_int,
batch_size = batch_size)
# decode the encoded input using decoding_layer
dec_output_train, dec_output_infer = decoding_layer( dec_input = dec_input,
encoder_state = enc_state,
target_sequence_length = target_sequence_length,
max_target_sequence_length = max_target_sentence_length,
rnn_size = rnn_size,
num_layers = num_layers,
target_vocab_to_int = target_vocab_to_int,
target_vocab_size = target_vocab_size,
batch_size = batch_size,
keep_prob = keep_prob,
decoding_embedding_size = dec_embedding_size)
return dec_output_train, dec_output_infer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 20
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 64
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 256
decoding_embedding_size = 256
# Learning Rate
learning_rate = 0.01
# Dropout Keep Probability
keep_probability = 0.4
display_step = 64
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
DON'T MODIFY ANYTHING IN THIS CELL
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
# convert to lowercase
sentence = sentence.lower()
# convert words to ids, using vocab_to_int
sequence = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.split()]
return sequence
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: You would like a classifier to separate the blue dots from the red dots.
Step4: 2 - Zero initialization
Step5: Expected Output
Step6: The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary
Step8: The model is predicting 0 for every example.
Step9: Expected Output
Step10: If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
Step12: Observations
Step13: Expected Output
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*10
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In today's lab, we're going to use logistic regression to classify handwritten digits. You'll learn about logistic / softmax regression and TensorFlow, a popular machine learning library developed by Google.
Step2: Here are the dimensions of the data. You'll see that TensorFlow has already split the dataset into training, validation, and test sets.
Step4: Each training example is originally a 28x28 image
Step5: Question 1
Step6: Notice that there are more than 2 labels (0 through 9), and the label data are represented in a one-hot encoding. So, the labels have dimension n x 10. This is different from what we've done before, but it is is a typical strategy for multiclass classification. We will see how our softmax loss function incorporates 10-dimensional labels.
Step7: Question 3
Step8: Question 4
Step9: y is a variable now. Its value will be determined by the inputs x and parameters theta and b.
Step10: We'll call cross_entropy the loss function, but as a Python object it's just another TensorFlow variable. Its value is a scalar, the number we'd like to minimize by choosing theta and b.
Step11: Train it!
Step12: How did we do?
Step13: Not bad! Let's see some examples of your predictions
Step14: Chances are some of your incorrect predictions are hard for you to guess, too!
|
<ASSISTANT_TASK:>
Python Code:
!pip install tensorflow
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import math
!pip install -U okpy
from client.api.notebook import Notebook
ok = Notebook('lab12.ok')
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
mnist.train.images.shape, mnist.validation.images.shape, mnist.test.images.shape
def example_to_image(example):
'''Takes in a length-784 training example and returns a (28, 28) image.'''
return example.reshape((28, 28))
def show_images(images, ncols=2, figsize=(10, 7), **kwargs):
Shows one or more images.
images: Image or list of images.
def show_image(image, axis=plt):
plt.imshow(image, **kwargs)
if not (isinstance(images, list) or isinstance(images, tuple)):
images = [images]
nrows = math.ceil(len(images) / ncols)
ncols = min(len(images), ncols)
plt.figure(figsize=figsize)
for i, image in enumerate(images):
axis = plt.subplot2grid(
(nrows, ncols),
(i // ncols, i % ncols),
)
axis.tick_params(bottom='off', left='off', top='off', right='off',
labelleft='off', labelbottom='off')
axis.grid(False)
show_image(image, axis)
# These indices are the examples you should show from mnist.train.images
examples_to_show = np.array([0, 5100, 10200, 15300, 20400, 25500, 30600, 35700, 40800, 45900])
# Get the examples from the training data
examples = ...
# Convert each example into an image
images = ...
# Call show_images using ncols=5
...
# We'll print the labels for each of these examples
mnist.train.labels[examples_to_show]
x = ...
y_ = ...
theta = ...
b = ...
y = ...
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = ...
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:")
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
EXAMPLES_TO_SHOW = 10
corrects = sess.run(correct_prediction, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
correct_i = np.where(corrects)[0][:EXAMPLES_TO_SHOW]
print("Correct predictions:")
correct_ex = mnist.test.images[correct_i]
correct_images = [example_to_image(example) for example in correct_ex]
show_images(correct_images, 5)
incorrect_i = np.where(~corrects)[0][:EXAMPLES_TO_SHOW]
print("Incorrect predictions:")
incorrect_ex = mnist.test.images[incorrect_i]
incorrect_images = [example_to_image(example) for example in incorrect_ex]
show_images(incorrect_images, 5)
print("You predicted:")
print(sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})[incorrect_i])
i_finished_the_lab = False
_ = ok.grade('qcompleted')
_ = ok.backup()
_ = ok.submit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <strong>Now you can use the .groupby() method to group rows together based off of a column name.<br>For instance let's group based off of Company. This will create a DataFrameGroupBy object
Step2: You can save this object as a new variable
Step3: And then call aggregate methods off the object
Step4: More examples of aggregate methods
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
# Create dataframe
data = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],
'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],
'Sales':[200,120,340,124,243,350]}
df = pd.DataFrame(data)
df
df.groupby('Company')
by_comp = df.groupby("Company")
by_comp.mean()
df.groupby('Company').mean()
by_comp.std()
by_comp.min()
by_comp.max()
by_comp.count()
by_comp.describe()
by_comp.describe().transpose()
by_comp.describe().transpose()['GOOG']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-3', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_boston
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import scale
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import KFold
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
boston = load_boston()
print boston.DESCR
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each ray (starting from origin O(0,0)) crosses successive turnings of the spiral at constant distance points, namely at distance=$2\pi a$.
Step2: Read the json file created from data posted at wtatennis.com
Step3: played_at is the list of tournaments Simona Halep participated in
Step4: The arcs of spiral are defined as Plotly SVG paths
Step5: The function make_bar returns a Plotly dict that will be used to generate the bar shapes
Step6: Define a function setting the plot layout
Step7: The bar charts corresponding to two consecutive matches in a tournament are separated by an arc of length interM,
Step8: The bars are colored by the following rule
Step9: Get data for generating bars and data to be displayed when hovering the mouse over the plot
Step10: Check list lengths
Step11: Define the list of strings to be displayed for each bar
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
PI=np.pi
a=2
theta=np.linspace(3*PI/2, 8*PI, 400)
z=a*theta*np.exp(-1j*theta)
plt.figure(figsize=(6,6))
plt.plot(z.real, z.imag)
plt.axis('equal')
h=7.0
score={0: 0., 1:10./h, 2: 20/h, 3: 30/h, 4: 40/h, 5: 50/h, 6: 60/h, 7: 70/h}
score[6]
import plotly.plotly as py
from plotly.graph_objs import *
import json
with open("halep2015.json") as json_file:
jdata = json.load(json_file)
print jdata['Shenzen']
played_at=['Shenzen', 'Australian Open', 'Fed Cup', 'Dubai', 'Indiana Wells', 'Miami',
'Stuttgart', 'Madrid', 'Rome', 'French Open', 'Birmingham', 'Wimbledon', 'Toronto',
'Cincinnati', 'US Open', 'Guangzhou', 'Wuhan', 'Beijing', 'WTA Finals' ]
#define a dict giving the number of matches played by Halep in each tournament k
nmatches={ k: len(jdata[where][3:]) for (k, where) in enumerate(played_at) }
def make_arc(aa, theta0, theta1, dist, nr=4):# defines the arc of spiral between theta0 and theta1,
theta=np.linspace(theta0, theta1, nr)
pts=(aa*theta+dist)*np.exp(-1j*theta)# points on spiral arc r=aa*theta
string_arc='M '
for k in range(len(theta)):
string_arc+=str(pts.real[k])+', '+str(pts.imag[k])+' L '
return string_arc
make_arc(0.2, PI+0.2, PI, 4)[1:]
def make_bar(bar_height, theta0, fill_color, rad=0.2, a=2):
theta1=theta0+rad
C=(a*theta1+bar_height)*np.exp(-1j*theta1)
D=a*theta0*np.exp(-1j*theta0)
return dict(
line=Line(color=fill_color, width=0.5
),
path= make_arc(a, theta0, theta0+rad, 0.0)+str(C.real)+', '+str(C.imag)+' '+\
make_arc(a, theta1, theta0, bar_height)[1:]+ str(D.real)+', '+str(D.imag),
type='path',
fillcolor=fill_color
)
def make_layout(title, plot_size):
axis=dict(showline=False, # hide axis line, grid, ticklabels and title
zeroline=False,
showgrid=False,
showticklabels=False,
title=''
)
return Layout(title=title,
font=Font(size=12),
xaxis=XAxis(axis),
yaxis=YAxis(axis),
showlegend=False,
width=plot_size,
height=plot_size,
margin=Margin(t=30, b=30, l=30, r=30),
hovermode='closest',
shapes=[]# below we append to shapes the dicts defining
#the bars associated to set scores
)
title='Simona Halep 2015 Tournament Results<br>Each arc of spiral corresponds to a tournament'
layout=make_layout(title, 700)
interM=2.0#the length of circle arc approximating an arc of spiral, between two consecutive matches
interT=3.5# between two tournaments
colors=['#dc3148','#864d7f','#9e70a2', '#caaac2','#d6c7dd', '#e6e1dd']
a=2.0 # the parameter in spiral equation z(theta)=a*theta exp(-i theta)
theta0=3*PI/2 # the starting point of the spiral
Theta=[]# the list of tournament arc ends
Opponent=[]# the list of opponents in each set of all matches played by halep
rankOp=[]# rank of opponent list
middleBar=[]# theta coordinate for the middle point of each bar base
half_h=[]# the list of bar heights/2
wb=1.5# bar width along the spiral
rad=wb/(a*theta0)#the angle in radians corresponding to an arc of length wb,
#within the circle of radius a*theta
rank_Halep=[]
Halep_set_sc=[]# list of Halep set scores
Opponent_set_sc=[]# list of opponent set scores
bar_colors=[]# the list of colors assigned to each bar in bar charts
for k, where in enumerate(played_at):
nr=nmatches[k]# nr is the number of matches played by Halep in the k^th tournament
Theta.append(theta0)
for match in range(nr):
player=jdata[where][3+match].keys()[0]# opponent name in match match
rankOp.append(int(player.partition('(')[2].partition(')')[0]))#Extract opponent rank:
set_sc=jdata[where][3+match].values()[0]#set scores in match match
sets=len(set_sc)
#set bar colors according to opponent rank
if rankOp[-1] in range(1,11): col=colors[1]
elif rankOp[-1] in range(11, 21): col=colors[2]
elif rankOp[-1] in range(21, 51): col=colors[3]
elif rankOp[-1] in range(51, 101): col=colors[4]
else: col=colors[5]
for s in range(0, sets, 2):
middleBar.append(0.5*(2*theta0+rad))# get the middle of each angular interval
# defining bar base
rank_Halep+=[jdata[where][0]['rank']]
Halep_set_sc.append(set_sc[s])
half_h.append(0.5*score[set_sc[s]])# middle of bar height
bar_colors.append(colors[0])
layout['shapes'].append(make_bar(score[set_sc[s]], theta0, colors[0], rad=rad, a=2))
rad=wb/(a*theta0)
theta0=theta0+rad
middleBar.append(0.5*(2*theta0+rad))
Opponent_set_sc.append(set_sc[s+1])
half_h.append(0.5*score[set_sc[s+1]])
Opponent.append(jdata[where][3+match].keys()[0])
bar_colors.append(col)
layout['shapes'].append(make_bar(score[set_sc[s+1]], theta0, col , rad=rad, a=2))
rad=wb/(a*theta0)
theta0=theta0+rad
gapM=interM/(a*theta0)
theta0=theta0-rad+gapM
gapT=interT/(a*theta0)
Theta.append(theta0)
theta0=theta0-gapM+gapT
print len(bar_colors), len(middleBar), len(Opponent), len(half_h)
nrB=nrB=len(bar_colors)
playersRank=['n']*nrB
for k in range(0,nrB, 2):
playersRank[k]=u'Halep'+' ('+'{:d}'.format(rank_Halep[k/2])+')'+'<br>'+\
'set score: '+str(Halep_set_sc[k/2])
for k in range(1, nrB, 2):
playersRank[k]=Opponent[(k-1)/2]+'<br>'+'set score: '+str(Opponent_set_sc[(k-1)/2])
players=[]# Plotly traces that define position of text on bars
for k in range(nrB):
z=(a*middleBar[k]+half_h[k])*np.exp(-1j*middleBar[k])
players.append(Scatter(x=[z.real],
y=[z.imag],
mode='markers',
marker=Marker(size=0.25, color=bar_colors[k]),
name='',
text=playersRank[k],
hoverinfo='text'
)
)
LT=len(Theta)
aa=[a-0.11]*2+[a-0.1]*3+[a-0.085]*5+[a-0.075]*5+[a-0.065]*4# here is a trick to get spiral arcs
#looking at the same distance from the bar charts
spiral=[] #Plotly traces of spiral arcs
for k in range(0, LT, 2):
X=[]
Y=[]
theta=np.linspace(Theta[k], Theta[k+1], 40)
Z=aa[k/2]*theta*np.exp(-1j*theta)
X+=Z.real.tolist()
Y+=Z.imag.tolist()
X.append(None)
Y.append(None)
spiral.append(Scatter(x=X,
y=Y,
mode='lines',
line=Line(color='#23238E', width=4),
name='',
text=played_at[k/2],
hoverinfo='text'))
data=Data(spiral+players)
fig=Figure(data=data,layout=layout)
py.sign_in('empet', 'my_api_key')
py.iplot(fig, filename='spiral-plot')
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Many different types of samples were obtained from the TCGA participants, and details about these samples are available in the Biospecimen data table. This next query shows how many samples exist of each type, as well as the full names and abbreviations of each type
Step2: Note that there are many types of tumor samples
Step3: In this example, let's assume that we would like to do a study that requires a primary tumor sample and a matched-normal (adjacent) tissue sample. In order to find out which patients provided which types of samples, we need to query the Biospecimen data table. This next query module uses two sub-queries, one to get all patients with TP samples and another to get all patients with NT samples. The final query joins these two and returns a single list of patients.
Step4: It might be interesting to find out what the distribution of tumor types is for this list of patients with matched tumor-normal sample pairs. We can define a new SQL module that refers to the results of a previously defined query as long as we pass that reference in when we call bq.Query()
|
<ASSISTANT_TASK:>
Python Code:
import gcp.bigquery as bq
%%sql
SELECT
SampleType,
SampleTypeLetterCode,
COUNT(*) AS n
FROM
[isb-cgc:tcga_201607_beta.Biospecimen_data]
GROUP BY
SampleType,
SampleTypeLetterCode,
ORDER BY
n DESC
%%sql
SELECT
SampleTypeLetterCode,
COUNT(*) AS n
FROM (
SELECT
SampleBarcode,
SampleTypeLetterCode
FROM
[isb-cgc:tcga_201607_beta.mRNA_UNC_HiSeq_RSEM]
GROUP BY
SampleBarcode,
SampleTypeLetterCode )
GROUP BY
SampleTypeLetterCode
ORDER BY
n DESC
%%sql --module patients_with_matched_samples
DEFINE QUERY patients_tp
SELECT
ParticipantBarcode
FROM
[isb-cgc:tcga_201607_beta.Biospecimen_data]
WHERE
( SampleTypeLetterCode="TP" )
GROUP BY
ParticipantBarcode
ORDER BY
ParticipantBarcode
DEFINE QUERY patients_nt
SELECT
ParticipantBarcode
FROM
[isb-cgc:tcga_201607_beta.Biospecimen_data]
WHERE
( SampleTypeLetterCode="NT" )
GROUP BY
ParticipantBarcode
ORDER BY
ParticipantBarcode
DEFINE QUERY patients_both
SELECT
nt.ParticipantBarcode AS ParticipantBarcode
FROM ( $patients_nt ) AS nt
JOIN
( $patients_tp ) AS tp
ON
nt.ParticipantBarcode = tp.ParticipantBarcode
ORDER BY
ParticipantBarcode
bq.Query(patients_with_matched_samples).results().to_dataframe()
%%sql --module tumor_type_counts
# now we'll use this list to find what types of tumors these patients
# belong to:
SELECT
Study,
COUNT(*) AS n
FROM
[isb-cgc:tcga_201607_beta.Clinical_data]
WHERE
ParticipantBarcode IN ($patients_both)
GROUP BY
Study
ORDER BY
n DESC
bq.Query(tumor_type_counts,
patients_nt=patients_with_matched_samples.patients_nt,
patients_tp=patients_with_matched_samples.patients_tp,
patients_both=patients_with_matched_samples.patients_both).results().to_dataframe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and install WeChat API-2
Step2: Housekeeping after installation
Step3: If above importing has no error, then installation is successful.
|
<ASSISTANT_TASK:>
Python Code:
!python --version
!pip install -U html
!pip install -U pyqrcode
!pip install -U config
!pip install -U backports.tempfile
!mv docs org_docs
!yes | pip uninstall itchat
!rm -rf ItChat
!git clone https://github.com/telescopeuser/ItChat.git
!cp -r ItChat/* .
!python setup.py install
!rm -rf itchat
!rm -rf ItChat
!rm -rf wxpy
!rm -rf README*
!rm -rf LICENSE
!rm -rf MANIFEST*
!rm -rf mkdocs*
!rm -rf build
!rm -rf dist
!rm -rf docs*
!rm -rf requirements.txt
!rm -rf setup.py
!rm -rf *.egg-info
!mv org_docs docs
!pip install -U google-api-python-client
!pip install -U gTTS
!apt-get update -y
!apt-get install libav-tools -y --allow-unauthenticated
!avconv -version
print('')
print('+-------------------------------------------------------------------------------------------------+')
print('| www.KudosData.com: Google Cloud Datalab Python 2 setup successful! |')
print('| You are now ready to rock! Go to folder: workshop_blog/wechat_tool, open Notebook and follow... |')
print('+-------------------------------------------------------------------------------------------------+')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Captioning with RNNs
Step2: Install h5py
Step3: Microsoft COCO
Step4: Look at the data
Step5: Recurrent Neural Networks
Step6: Vanilla RNN
Step7: Vanilla RNN
Step8: Vanilla RNN
Step9: Word embedding
Step10: Word embedding
Step11: Temporal Affine layer
Step12: Temporal Softmax loss
Step13: RNN for image captioning
Step14: Run the following cell to perform numeric gradient checking on the CaptioningRNN class; you should errors around 5e-6 or less.
Step15: Overfit small data
Step16: Test-time sampling
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
from __future__ import print_function
import time, os, json
import numpy as np
import matplotlib.pyplot as plt
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.rnn_layers import *
from cs231n.captioning_solver import CaptioningSolver
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
!pip install h5py
# Load COCO data from disk; this returns a dictionary
# We'll work with dimensionality-reduced features for this notebook, but feel
# free to experiment with the original features by changing the flag below.
data = load_coco_data(pca_features=True)
# Print out all the keys and values from the data dictionary
for k, v in data.items():
if type(v) == np.ndarray:
print(k, type(v), v.shape, v.dtype)
else:
print(k, type(v), len(v))
# Sample a minibatch and show the images and captions
batch_size = 3
captions, features, urls = sample_coco_minibatch(data, batch_size=batch_size)
for i, (caption, url) in enumerate(zip(captions, urls)):
plt.imshow(image_from_url(url))
plt.axis('off')
caption_str = decode_captions(caption, data['idx_to_word'])
plt.title(caption_str)
plt.show()
N, D, H = 3, 10, 4
x = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)
Wx = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)
Wh = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)
b = np.linspace(-0.2, 0.4, num=H)
next_h, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)
expected_next_h = np.asarray([
[-0.58172089, -0.50182032, -0.41232771, -0.31410098],
[ 0.66854692, 0.79562378, 0.87755553, 0.92795967],
[ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])
print('next_h error: ', rel_error(expected_next_h, next_h))
from cs231n.rnn_layers import rnn_step_forward, rnn_step_backward
np.random.seed(231)
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
fx = lambda x: rnn_step_forward(x, h, Wx, Wh, b)[0]
fh = lambda prev_h: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_step_forward(x, h, Wx, Wh, b)[0]
fb = lambda b: rnn_step_forward(x, h, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dnext_h)
dprev_h_num = eval_numerical_gradient_array(fh, h, dnext_h)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dnext_h)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dnext_h)
db_num = eval_numerical_gradient_array(fb, b, dnext_h)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dprev_h error: ', rel_error(dprev_h_num, dprev_h))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, T, D, H = 2, 3, 4, 5
x = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)
Wh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)
b = np.linspace(-0.7, 0.1, num=H)
h, _ = rnn_forward(x, h0, Wx, Wh, b)
expected_h = np.asarray([
[
[-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],
[-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],
[-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],
],
[
[-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],
[-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],
[-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])
print('h error: ', rel_error(expected_h, h))
np.random.seed(231)
N, D, T, H = 2, 3, 10, 5
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_forward(x, h0, Wx, Wh, b)
dout = np.random.randn(*out.shape)
dx, dh0, dWx, dWh, db = rnn_backward(dout, cache)
fx = lambda x: rnn_forward(x, h0, Wx, Wh, b)[0]
fh0 = lambda h0: rnn_forward(x, h0, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_forward(x, h0, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_forward(x, h0, Wx, Wh, b)[0]
fb = lambda b: rnn_forward(x, h0, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dh0_num = eval_numerical_gradient_array(fh0, h0, dout)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dout)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, T, V, D = 2, 4, 5, 3
x = np.asarray([[0, 3, 1, 2], [2, 1, 0, 3]])
W = np.linspace(0, 1, num=V*D).reshape(V, D)
out, _ = word_embedding_forward(x, W)
expected_out = np.asarray([
[[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0.42857143, 0.5, 0.57142857]],
[[ 0.42857143, 0.5, 0.57142857],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429]]])
print('out error: ', rel_error(expected_out, out))
np.random.seed(231)
N, T, V, D = 50, 3, 5, 6
x = np.random.randint(V, size=(N, T))
W = np.random.randn(V, D)
out, cache = word_embedding_forward(x, W)
dout = np.random.randn(*out.shape)
dW = word_embedding_backward(dout, cache)
f = lambda W: word_embedding_forward(x, W)[0]
dW_num = eval_numerical_gradient_array(f, W, dout)
print('dW error: ', rel_error(dW, dW_num))
np.random.seed(231)
# Gradient check for temporal affine layer
N, T, D, M = 2, 3, 4, 5
x = np.random.randn(N, T, D)
w = np.random.randn(D, M)
b = np.random.randn(M)
out, cache = temporal_affine_forward(x, w, b)
dout = np.random.randn(*out.shape)
fx = lambda x: temporal_affine_forward(x, w, b)[0]
fw = lambda w: temporal_affine_forward(x, w, b)[0]
fb = lambda b: temporal_affine_forward(x, w, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dw_num = eval_numerical_gradient_array(fw, w, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
dx, dw, db = temporal_affine_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
# Sanity check for temporal softmax loss
from cs231n.rnn_layers import temporal_softmax_loss
N, T, V = 100, 1, 10
def check_loss(N, T, V, p):
x = 0.001 * np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = np.random.rand(N, T) <= p
print(temporal_softmax_loss(x, y, mask)[0])
check_loss(100, 1, 10, 1.0) # Should be about 2.3
check_loss(100, 10, 10, 1.0) # Should be about 23
check_loss(5000, 10, 10, 0.1) # Should be about 2.3
# Gradient check for temporal softmax loss
N, T, V = 7, 8, 9
x = np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = (np.random.rand(N, T) > 0.5)
loss, dx = temporal_softmax_loss(x, y, mask, verbose=False)
dx_num = eval_numerical_gradient(lambda x: temporal_softmax_loss(x, y, mask)[0], x, verbose=False)
print('dx error: ', rel_error(dx, dx_num))
N, D, W, H = 10, 20, 30, 40
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
V = len(word_to_idx)
T = 13
model = CaptioningRNN(word_to_idx,
input_dim=D,
wordvec_dim=W,
hidden_dim=H,
cell_type='rnn',
dtype=np.float64)
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
features = np.linspace(-1.5, 0.3, num=(N * D)).reshape(N, D)
captions = (np.arange(N * T) % V).reshape(N, T)
loss, grads = model.loss(features, captions)
expected_loss = 9.83235591003
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
np.random.seed(231)
batch_size = 2
timesteps = 3
input_dim = 4
wordvec_dim = 5
hidden_dim = 6
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
vocab_size = len(word_to_idx)
captions = np.random.randint(vocab_size, size=(batch_size, timesteps))
features = np.random.randn(batch_size, input_dim)
model = CaptioningRNN(word_to_idx,
input_dim=input_dim,
wordvec_dim=wordvec_dim,
hidden_dim=hidden_dim,
cell_type='rnn',
dtype=np.float64,
)
loss, grads = model.loss(features, captions)
for param_name in sorted(grads):
f = lambda _: model.loss(features, captions)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s relative error: %e' % (param_name, e))
np.random.seed(231)
small_data = load_coco_data(max_train=50)
small_rnn_model = CaptioningRNN(
cell_type='rnn',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=256,
)
small_rnn_solver = CaptioningSolver(small_rnn_model, small_data,
update_rule='adam',
num_epochs=50,
batch_size=25,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=0.95,
verbose=True, print_every=10,
)
small_rnn_solver.train()
# Plot the training losses
plt.plot(small_rnn_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = small_rnn_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please change the pkg_path and model_file to be correct path
Step2: Plot the Feature Importance
Step3: Plot the First Tree
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
%matplotlib inline
pkg_path = '../../python-package/'
model_file = 's3://my-bucket/xgb-demo/model/0002.model'
sys.path.insert(0, pkg_path)
import xgboost as xgb
# plot the first two trees.
bst = xgb.Booster(model_file=model_file)
xgb.plot_importance(bst)
tree_id = 0
xgb.to_graphviz(bst, tree_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here I've defined an empty list, called x. Like our previous variables, this has both a name (x) and a type (list). However, it doesn't have any actual value beyond that; it's just an empty list. Imagine a filing cabinet with nothing in it.
Step2: The append() method takes whatever argument I supply to the function, and inserts it into the next available position in the list. Which, in this case, is the very first position (since the list was previously empty).
Step3: It's tough to tell that there's really anything going on, but those square brackets [ and ] are the key
Step4: In this example, I've created a new list y, initially empty, and added three integer values. Notice the ordering of the elements when I print the list at the end
Step5: and floats
Step6: and even other lists!
Step7: Indexing
Step8: In this code example, I've used the number 1 as an index to y. In doing so, I took out the value at index 1 and put it into a variable named first_element. I then printed it, as well as the list y, and voi--
Step9: Much better.
Step10: Yep, there's our inception-list, the last element of y.
Step11: Using more indexing voodoo, you can also index slices of lists. Let's say we want to create a new list that consists of the integer elements of y, which are the first three. We could pull them out one by one, or use slicing
Step12: That y[0
Step13: Python, why do you torment me so?!
Step14: Part 2
Step15: we use regular parentheses to create a tuple!
Step16: With lists, if you wanted to change the item at index 2, you could go right ahead
Step17: Can't do that with tuples, sorry.
Step18: Like list, there is a method for building an empty tuple. Any guesses?
Step19: And like lists, you have (almost) all of the other methods at your disposal, such as slicing and len
Step20: Sets
Step21: There are certain situations where this can be very useful. It should be noted that sets can actually be built from lists, so you can build a list and then turn it into a set
Step22: Sets also don't index the same way lists and tuples do
Step23: If you want to add elements to a set, you can use the add method.
Step24: New elements can be added to the dictionary in much the same way as lists
Step25: Yes, you can use strings as keys! In fact, string is probably the most common data type to use as a key in dictionaries. That way, you can treat dictionaries as "look up" tables--maybe you're storing information on people in a beta testing program. You can store their information by name
Step26: Since dictionaries do not maintain any kind of ordering of elements, using integers as indices won't give us anything useful. However, dictionaries do have a keys() method that gives us a list of all the keys in the dictionary
Step27: and a values() method for (you guessed it) the values in the dictionary
Step28: To further induce Inception-style headaches, dictionaries also have a items() method that returns a list of tuples where each tuple is a key-value pair in the dictionary!
Step29: (it's basically the entire dictionary, but this method is useful for looping)
Step30: This is a list containing the ages of some group of students. Any group. Any group of students. And we want to compute the average. How do we compute averages?
Step31: The total quantity is a bit trickier. You could certainly sum them all manually--
Step32: ...but that seems really, really tedious. Plus, how do you even know how many elements your list has?
Step33: 1
Step34: IMPORTANT
Step35: With loops, whitespace in Python really starts to matter. If you want many things to happen inside of a loop, you'll need to indent every line!
Step36: ...but that's awfully boring. Plus, it's of no help if I come back with a "Part 2" that asks for the squares for numbers 11-20. And a "Part C" for 21-30. Can we make this loop work for any numbers?
Step37: You can think of this as a kinda-sorta list, or even convert it to a list if you want
Step38: range(end)
Step39: Looping through dictionaries
Step40: Remember the super-useful methods for iterating through dictionaries? keys gives you a list of all the keys, values a list of all the values, and items a list of tuples of the key-value pairs. Here's the loop
Step41: 1
Step42: instead of this
Step43: In the same vein, I could have just as easily written the loop like this
|
<ASSISTANT_TASK:>
Python Code:
x = list()
x.append(1)
print(x)
y = list()
y.append(1)
y.append(2)
y.append(3)
print(y)
y.append("this is perfectly legal")
y.append(4.2)
y.append(list()) # Inception BWAAAAAAAAAA
print(y)
first_element = y[1]
print(first_element)
print(y)
print(y[0])
print(y)
print(y[-1])
print(y)
print(y[-2])
print(y[-3])
print(y)
int_elements = y[0:3] # Slicing!
print(y)
print(int_elements)
print(y[3])
z = [42, 502.4, "some string", 0]
x = [3, 64.2, "some list"]
print(type(x))
y = (3, 64.2, "some tuple")
print(type(y))
x[2] = "a different string"
print(x)
y[2] = "does this work?"
z = tuple()
print(y[0:2])
print(len(y))
x = list()
x.append(1)
x.append(2)
x.append(2) # Add the same thing twice.
s = set()
s.add(1)
s.add(2)
s.add(2) # Add the same thing twice...again.
print(x)
print(s)
x = [1, 2, 3, 3]
s = set(x) # Take the list x as the starting point.
print(s)
print(s[0])
d = dict()
# Or...
d = {}
d["some_key"] = 14.3
d["shannon_quinn"] = ["some", "personal", "information"]
print(d)
print(d.keys())
print(d.values())
print(d.items())
ages = [21, 22, 19, 19, 22, 21, 22, 31]
number_of_elements = len(ages)
print(number_of_elements)
age_sum = ages[0] + ages[1] + ages[2] # + ... and so on
age_sum = 0.0 # Set age_sum to be a float. Why a float? This becomes important at the end.
for age in ages: # 1
age_sum += age # 2
avg = age_sum / number_of_elements # Compute the average using the formula we know and love!
print("Average age: {:.2f}".format(avg))
s = set([1, 1, 2, 3, 5])
for item in s:
print(item)
t = tuple([1, 1, 2, 3, 5])
for item in t:
print(item)
some_list = [3.14159, "random stuff", 4200]
for item in some_list:
print(item)
squares = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
numbers = range(10)
print(numbers)
numbers = list(numbers)
print(numbers)
squares = [] # Empty list for all our squares
for num in numbers:
squared_number = num ** 2 # Exponent operation!
squares.append(squared_number) # Add to our list.
print(squares)
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'shannon': 'python'
}
# Notice the indentation, if you decide to define a dictionary this way!
for key, value in favorite_languages.items(): # 1
print("{} prefers {}.".format(key, value)) # 2
some_list = ['a', 'b']
a, b = some_list
some_list = ['a', 'b']
a = some_list[0]
b = some_list[1]
for keyvalue in favorite_languages.items(): # 1
key = keyvalue[0]
value = keyvalue[1]
print("{} prefers {}.".format(key, value)) # 2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Computing the eigenvalues and the eigenvectors
Step2: The @ operator stands, in this context, for matrix multiplication.
Step3: Modal Response
Step4: The definition of time vectoris a bit complicated...
|
<ASSISTANT_TASK:>
Python Code:
M = np.array(((2.0, 0.0), ( 0.0, 1.0)))
K = np.array(((3.0,-2.0), (-2.0, 2.0)))
p = np.array(( 0.0, 1.0))
w = 2.0
evals, Psi = eigh(K, M)
Mstar = Psi.T@M@Psi
Kstar = Psi.T@K@Psi
pstar = Psi.T@p
print(evals,end='\n\n')
print(Psi,end='\n\n')
print(Mstar,end='\n\n')
print(Kstar,end='\n\n')
print(pstar,end='\n\n')
print(Mstar[0,1]*150*10E6*1000)
L = np.sqrt(evals)
DAF = 1.0/(L**2-w**2)
beta = w/L
t = np.linspace(0,60,601)[:,None]
q = pstar*DAF*(np.sin(w*t)-beta*np.sin(L*t))
curves = plt.plot(t,q)
plt.legend(curves,['q1', 'q2'])
plt.title('Modal Response')
plt.xlabel('$\omega_0t$')
plt.ylabel('$q_i/\Delta_{st}$');
x = (Psi@q.T).T
curves = plt.plot(t, x)
plt.legend(curves,['x1', 'x2'])
plt.title('Structural Response')
plt.xlabel('$\omega_0t$')
plt.ylabel('$X_i/\Delta_{st}$');
%matplotlib inline
import matplotlib.pyplot as plt ; plt.style.use(['fivethirtyeight', '00_mplrc'])
import numpy as np
from scipy.linalg import eigh
np.set_printoptions(suppress=False, linewidth=120)
from IPython.display import HTML
HTML(open('00_custom.css').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating a TensorFlow Dataset
Step2: MIRNet Model
Step3: Dual Attention Unit
Step4: Multi-Scale Residual Block
Step5: MIRNet Model
Step6: Training
Step7: Inference
Step8: Inference on Test Images
|
<ASSISTANT_TASK:>
Python Code:
import os
import cv2
import random
import numpy as np
from glob import glob
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!gdown https://drive.google.com/uc?id=1DdGIJ4PZPlF2ikl8mNM9V-PdVxVLbQi6
!unzip -q lol_dataset.zip
random.seed(10)
IMAGE_SIZE = 128
BATCH_SIZE = 4
MAX_TRAIN_IMAGES = 300
def read_image(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.cast(image, dtype=tf.float32) / 255.0
return image
def random_crop(low_image, enhanced_image):
low_image_shape = tf.shape(low_image)[:2]
low_w = tf.random.uniform(
shape=(), maxval=low_image_shape[1] - IMAGE_SIZE + 1, dtype=tf.int32
)
low_h = tf.random.uniform(
shape=(), maxval=low_image_shape[0] - IMAGE_SIZE + 1, dtype=tf.int32
)
enhanced_w = low_w
enhanced_h = low_h
low_image_cropped = low_image[
low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
]
enhanced_image_cropped = enhanced_image[
enhanced_h : enhanced_h + IMAGE_SIZE, enhanced_w : enhanced_w + IMAGE_SIZE
]
return low_image_cropped, enhanced_image_cropped
def load_data(low_light_image_path, enhanced_image_path):
low_light_image = read_image(low_light_image_path)
enhanced_image = read_image(enhanced_image_path)
low_light_image, enhanced_image = random_crop(low_light_image, enhanced_image)
return low_light_image, enhanced_image
def get_dataset(low_light_images, enhanced_images):
dataset = tf.data.Dataset.from_tensor_slices((low_light_images, enhanced_images))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[:MAX_TRAIN_IMAGES]
train_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[:MAX_TRAIN_IMAGES]
val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[MAX_TRAIN_IMAGES:]
val_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[MAX_TRAIN_IMAGES:]
test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
test_enhanced_images = sorted(glob("./lol_dataset/eval15/high/*"))
train_dataset = get_dataset(train_low_light_images, train_enhanced_images)
val_dataset = get_dataset(val_low_light_images, val_enhanced_images)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
def selective_kernel_feature_fusion(
multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3
):
channels = list(multi_scale_feature_1.shape)[-1]
combined_feature = layers.Add()(
[multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3]
)
gap = layers.GlobalAveragePooling2D()(combined_feature)
channel_wise_statistics = tf.reshape(gap, shape=(-1, 1, 1, channels))
compact_feature_representation = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(channel_wise_statistics)
feature_descriptor_1 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_2 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_3 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_1 = multi_scale_feature_1 * feature_descriptor_1
feature_2 = multi_scale_feature_2 * feature_descriptor_2
feature_3 = multi_scale_feature_3 * feature_descriptor_3
aggregated_feature = layers.Add()([feature_1, feature_2, feature_3])
return aggregated_feature
def spatial_attention_block(input_tensor):
average_pooling = tf.reduce_max(input_tensor, axis=-1)
average_pooling = tf.expand_dims(average_pooling, axis=-1)
max_pooling = tf.reduce_mean(input_tensor, axis=-1)
max_pooling = tf.expand_dims(max_pooling, axis=-1)
concatenated = layers.Concatenate(axis=-1)([average_pooling, max_pooling])
feature_map = layers.Conv2D(1, kernel_size=(1, 1))(concatenated)
feature_map = tf.nn.sigmoid(feature_map)
return input_tensor * feature_map
def channel_attention_block(input_tensor):
channels = list(input_tensor.shape)[-1]
average_pooling = layers.GlobalAveragePooling2D()(input_tensor)
feature_descriptor = tf.reshape(average_pooling, shape=(-1, 1, 1, channels))
feature_activations = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(feature_descriptor)
feature_activations = layers.Conv2D(
filters=channels, kernel_size=(1, 1), activation="sigmoid"
)(feature_activations)
return input_tensor * feature_activations
def dual_attention_unit_block(input_tensor):
channels = list(input_tensor.shape)[-1]
feature_map = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(input_tensor)
feature_map = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
feature_map
)
channel_attention = channel_attention_block(feature_map)
spatial_attention = spatial_attention_block(feature_map)
concatenation = layers.Concatenate(axis=-1)([channel_attention, spatial_attention])
concatenation = layers.Conv2D(channels, kernel_size=(1, 1))(concatenation)
return layers.Add()([input_tensor, concatenation])
# Recursive Residual Modules
def down_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
input_tensor
)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.MaxPooling2D()(main_branch)
main_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.MaxPooling2D()(input_tensor)
skip_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
def up_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
input_tensor
)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.UpSampling2D()(main_branch)
main_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.UpSampling2D()(input_tensor)
skip_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
# MRB Block
def multi_scale_residual_block(input_tensor, channels):
# features
level1 = input_tensor
level2 = down_sampling_module(input_tensor)
level3 = down_sampling_module(level2)
# DAU
level1_dau = dual_attention_unit_block(level1)
level2_dau = dual_attention_unit_block(level2)
level3_dau = dual_attention_unit_block(level3)
# SKFF
level1_skff = selective_kernel_feature_fusion(
level1_dau,
up_sampling_module(level2_dau),
up_sampling_module(up_sampling_module(level3_dau)),
)
level2_skff = selective_kernel_feature_fusion(
down_sampling_module(level1_dau), level2_dau, up_sampling_module(level3_dau)
)
level3_skff = selective_kernel_feature_fusion(
down_sampling_module(down_sampling_module(level1_dau)),
down_sampling_module(level2_dau),
level3_dau,
)
# DAU 2
level1_dau_2 = dual_attention_unit_block(level1_skff)
level2_dau_2 = up_sampling_module((dual_attention_unit_block(level2_skff)))
level3_dau_2 = up_sampling_module(
up_sampling_module(dual_attention_unit_block(level3_skff))
)
# SKFF 2
skff_ = selective_kernel_feature_fusion(level1_dau_2, level3_dau_2, level3_dau_2)
conv = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(skff_)
return layers.Add()([input_tensor, conv])
def recursive_residual_group(input_tensor, num_mrb, channels):
conv1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
for _ in range(num_mrb):
conv1 = multi_scale_residual_block(conv1, channels)
conv2 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(conv1)
return layers.Add()([conv2, input_tensor])
def mirnet_model(num_rrg, num_mrb, channels):
input_tensor = keras.Input(shape=[None, None, 3])
x1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
for _ in range(num_rrg):
x1 = recursive_residual_group(x1, num_mrb, channels)
conv = layers.Conv2D(3, kernel_size=(3, 3), padding="same")(x1)
output_tensor = layers.Add()([input_tensor, conv])
return keras.Model(input_tensor, output_tensor)
model = mirnet_model(num_rrg=3, num_mrb=2, channels=64)
def charbonnier_loss(y_true, y_pred):
return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3)))
def peak_signal_noise_ratio(y_true, y_pred):
return tf.image.psnr(y_pred, y_true, max_val=255.0)
optimizer = keras.optimizers.Adam(learning_rate=1e-4)
model.compile(
optimizer=optimizer, loss=charbonnier_loss, metrics=[peak_signal_noise_ratio]
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=50,
callbacks=[
keras.callbacks.ReduceLROnPlateau(
monitor="val_peak_signal_noise_ratio",
factor=0.5,
patience=5,
verbose=1,
min_delta=1e-7,
mode="max",
)
],
)
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
plt.plot(history.history["peak_signal_noise_ratio"], label="train_psnr")
plt.plot(history.history["val_peak_signal_noise_ratio"], label="val_psnr")
plt.xlabel("Epochs")
plt.ylabel("PSNR")
plt.title("Train and Validation PSNR Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
def plot_results(images, titles, figure_size=(12, 12)):
fig = plt.figure(figsize=figure_size)
for i in range(len(images)):
fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
_ = plt.imshow(images[i])
plt.axis("off")
plt.show()
def infer(original_image):
image = keras.preprocessing.image.img_to_array(original_image)
image = image.astype("float32") / 255.0
image = np.expand_dims(image, axis=0)
output = model.predict(image)
output_image = output[0] * 255.0
output_image = output_image.clip(0, 255)
output_image = output_image.reshape(
(np.shape(output_image)[0], np.shape(output_image)[1], 3)
)
output_image = Image.fromarray(np.uint8(output_image))
original_image = Image.fromarray(np.uint8(original_image))
return output_image
for low_light_image in random.sample(test_low_light_images, 6):
original_image = Image.open(low_light_image)
enhanced_image = infer(original_image)
plot_results(
[original_image, ImageOps.autocontrast(original_image), enhanced_image],
["Original", "PIL Autocontrast", "MIRNet Enhanced"],
(20, 12),
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Character counting and entropy
Step5: The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as
Step6: Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact
def char_probs(s):
Find the probabilities of the unique characters in the string s.
Parameters
----------
s : str
A string of characters.
Returns
-------
probs : dict
A dictionary whose keys are the unique characters in s and whose values
are the probabilities of those characters.
dictionary = {}
for n in s:
dictionary[n]= (s.count(n))/len(s)
return dictionary
test1 = char_probs('aaaa')
assert np.allclose(test1['a'], 1.0)
test2 = char_probs('aabb')
assert np.allclose(test2['a'], 0.5)
assert np.allclose(test2['b'], 0.5)
test3 = char_probs('abcd')
assert np.allclose(test3['a'], 0.25)
assert np.allclose(test3['b'], 0.25)
assert np.allclose(test3['c'], 0.25)
assert np.allclose(test3['d'], 0.25)
def entropy(d):
Compute the entropy of a dict d whose values are probabilities.
Return a list of 2-tuples of (word, count), sorted by count descending.
#t = np.array(d)
#t = np.sort(t)
H = 0
l = [(i,d[i]) for i in d]
t = sorted(l, key = lambda x:x[1], reverse = True)
for n in t:
H = H + (n[1])*np.log2(n[1])
#t = char_probs(t)*np.log2(char_probs(t))
return -H
entropy({'a': 0.5, 'b': 0.5})
assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0)
assert np.allclose(entropy({'a': 1.0}), 0.0)
def z(x):
print(entropy(char_probs(x)))
return entropy(char_probs(x))
interact(z, x='string');
assert True # use this for grading the pi digits histogram
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication keys
Step2: MongoDB Collection
Step6: Starting a Stream
Step8: Connect to a streaming API
Step9: Data Access and Analysis
Step10: Load results to a DataFrame
Step11: Checking the highest used words
Step12: Visualization
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import tweepy
import matplotlib.pyplot as plt
import pymongo
import ipywidgets as wgt
from IPython.display import display
from sklearn.feature_extraction.text import CountVectorizer
import re
from datetime import datetime
%matplotlib inline
api_key = "yP0yoCitoUNgD63ebMerGyJaE" # <---- Add your API Key
api_secret = "kLO5YUtlth3cd4lOHLy8nlLHW5npVQgUfO4FhsyCn6wCMIz5E6" # <---- Add your API Secret
access_token = "259862037-iMXNjfL8JBApm4LVcdfwc3FcMm7Xta4TKg5cd44K" # <---- Add your access token
access_token_secret = "UIgh08dtmavzlvlWWukIXwN5HDIQD0wNwyn5sPzhrynBf" # <---- Add your access token secret
auth = tweepy.OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
col = pymongo.MongoClient()["tweets"]["StreamingTutorial"]
col.count()
class MyStreamListener(tweepy.StreamListener):
counter = 0
def __init__(self, max_tweets=1000, *args, **kwargs):
self.max_tweets = max_tweets
self.counter = 0
super().__init__(*args, **kwargs)
def on_connect(self):
self.counter = 0
self.start_time = datetime.now()
def on_status(self, status):
# Increment counter
self.counter += 1
# Store tweet to MongoDB
col.insert_one(status._json)
if self.counter % 1 == 0:
value = int(100.00 * self.counter / self.max_tweets)
mining_time = datetime.now() - self.start_time
progress_bar.value = value
html_value = <span class="label label-primary">Tweets/Sec: %.1f</span> % (self.counter / max([1,mining_time.seconds]))
html_value += <span class="label label-success">Progress: %.1f%%</span> % (self.counter / self.max_tweets * 100.0)
html_value += <span class="label label-info">ETA: %.1f Sec</span> % ((self.max_tweets - self.counter) / (self.counter / max([1,mining_time.seconds])))
wgt_status.value = html_value
#print("%s/%s" % (self.counter, self.max_tweets))
if self.counter >= self.max_tweets:
myStream.disconnect()
print("Finished")
print("Total Mining Time: %s" % (mining_time))
print("Tweets/Sec: %.1f" % (self.max_tweets / mining_time.seconds))
progress_bar.value = 0
myStreamListener = MyStreamListener(max_tweets=100)
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
keywords = ["Jupyter",
"Python",
"Data Mining",
"Machine Learning",
"Data Science",
"Big Data",
"DataMining",
"MachineLearning",
"DataScience",
"BigData",
"IoT",
"#R",
]
# Visualize a progress bar to track progress
progress_bar = wgt.IntProgress(value=0)
display(progress_bar)
wgt_status = wgt.HTML(value=<span class="label label-primary">Tweets/Sec: 0.0</span>)
display(wgt_status)
# Start a filter with an error counter of 20
for error_counter in range(20):
try:
myStream.filter(track=keywords)
print("Tweets collected: %s" % myStream.listener.counter)
print("Total tweets in collection: %s" % col.count())
break
except:
print("ERROR# %s" % (error_counter + 1))
col.find_one()
dataset = [{"created_at": item["created_at"],
"text": item["text"],
"user": "@%s" % item["user"]["screen_name"],
"source": item["source"],
} for item in col.find()]
dataset = pd.DataFrame(dataset)
dataset
cv = CountVectorizer()
count_matrix = cv.fit_transform(dataset.text)
word_count = pd.DataFrame(cv.get_feature_names(), columns=["word"])
word_count["count"] = count_matrix.sum(axis=0).tolist()[0]
word_count = word_count.sort_values("count", ascending=False).reset_index(drop=True)
word_count[:50]
def get_source_name(x):
value = re.findall(pattern="<[^>]+>([^<]+)</a>", string=x)
if len(value) > 0:
return value[0]
else:
return ""
dataset.source_name = dataset.source.apply(get_source_name)
source_counts = dataset.source_name.value_counts().sort_values()[-10:]
bottom = [index for index, item in enumerate(source_counts.index)]
plt.barh(bottom, width=source_counts, color="orange", linewidth=0)
y_labels = ["%s %.1f%%" % (item, 100.0*source_counts[item]/len(dataset)) for index,item in enumerate(source_counts.index)]
plt.yticks(np.array(bottom)+0.4, y_labels)
source_counts
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Rotation about X-axis
Step3: Rotation about Y-axis
Step4: Rotation about Z-axis
Step5: CNOT
Step6: CSIGN
Step7: Berkeley
Step8: SWAPalpha
Step9: FREDKIN
Step10: TOFFOLI
Step11: SWAP
Step12: ISWAP
Step13: SQRTiSWAP
Step14: SQRTSWAP
Step15: SQRTNOT
Step16: HADAMARD
Step17: PHASEGATE
Step18: GLOBALPHASE
Step19: Mølmer–Sørensen gate
Step20: Qubit rotation gate
Step21: Expanding gates to larger qubit registers
Step22: Furthermore, the control and target qubits (when applicable) can also be similarly specified using keyword arguments control and target (or in some cases controls or targets)
Step23: Setup of a Qubit Circuit
Step24: In place of manually converting the SWAP gate to CNOTs, it can be automatically converted using an inbuilt function in QubitCircuit
Step25: From QuTiP 4.4, we can also add gate at arbitrary position in a circuit.
Step26: Example of basis transformation
Step27: The transformation can either be only in terms of 2-qubit gates
Step28: Or the transformation can be in terms of any 2 single qubit rotation gates along with the 2-qubit gate.
Step29: Resolving non-adjacent interactions
Step30: Adding gate in the middle of a circuit
Step31: User defined gates
Step32: To let the QubitCircuit process those gates, we need to modify its attribute QubitCircuit.user_gates, which is a python dictionary in the form {name
Step33: When calling the add_gate method, the target qubits and the argument need to be given.
Step34: Software versions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from IPython.display import Image
from numpy import pi
import numpy as np
from qutip import *
from qutip.qip.operations import *
from qutip.qip.circuit import QubitCircuit, Gate
cphase(pi/2)
q = QubitCircuit(2, reverse_states=False)
q.add_gate("CSIGN", controls=[0], targets=[1])
q.png
rx(pi/2)
q = QubitCircuit(1, reverse_states=False)
q.add_gate("RX", targets=[0], arg_value=pi/2, arg_label=r'\frac{\pi}{2}')
q.png
ry(pi/2)
q = QubitCircuit(1, reverse_states=False)
q.add_gate("RY", targets=[0], arg_value=pi/2, arg_label=r'\frac{\pi}{2}')
q.png
rz(pi/2)
q = QubitCircuit(1, reverse_states=False)
q.add_gate("RZ", targets=[0], arg_value=pi/2, arg_label=r'\frac{\pi}{2}')
q.png
cnot()
q = QubitCircuit(2, reverse_states=False)
q.add_gate("CNOT", controls=[0], targets=[1])
q.png
csign()
q = QubitCircuit(2, reverse_states=False)
q.add_gate("CSIGN", controls=[0], targets=[1])
q.png
berkeley()
q = QubitCircuit(2, reverse_states=False)
q.add_gate("BERKELEY", targets=[0, 1])
q.png
swapalpha(pi/2)
fredkin()
toffoli()
swap()
iswap()
sqrtiswap()
sqrtswap()
sqrtnot()
snot()
phasegate(pi/2)
globalphase(pi/2)
molmer_sorensen(pi/2)
qrot(pi/2, pi/4)
cnot(N=3)
q = QubitCircuit(3, reverse_states=False)
q.add_gate("CNOT", controls=[1], targets=[2])
q.png
cnot(N=3, control=2, target=0)
q = QubitCircuit(3, reverse_states=False)
q.add_gate("CNOT", controls=[0], targets=[2])
q.png
N = 2
qc0 = QubitCircuit(N)
qc0.add_gate("SWAP", [0, 1], None)
qc0.png
U_list0 = qc0.propagators()
U0 = gate_sequence_product(U_list0)
U0
qc1 = QubitCircuit(N)
qc1.add_gate("CNOT", 0, 1)
qc1.add_gate("CNOT", 1, 0)
qc1.add_gate("CNOT", 0, 1)
qc1.png
U_list1 = qc1.propagators()
U1 = gate_sequence_product(U_list1)
U1
qc2 = qc0.resolve_gates("CNOT")
qc2.png
U_list2 = qc2.propagators()
U2 = gate_sequence_product(U_list2)
U2
qc1.add_gate("CSIGN", index=1)
qc1.png
qc3 = QubitCircuit(3)
qc3.add_gate("CNOT", 1, 0)
qc3.add_gate("RX", 0, None, pi/2, r"\pi/2")
qc3.add_gate("RY", 1, None, pi/2, r"\pi/2")
qc3.add_gate("RZ", 2, None, pi/2, r"\pi/2")
qc3.add_gate("ISWAP", [1, 2])
qc3.png
U3 = gate_sequence_product(qc3.propagators())
U3
qc4 = qc3.resolve_gates("CNOT")
qc4.png
U4 = gate_sequence_product(qc4.propagators())
U4
qc5 = qc3.resolve_gates("ISWAP")
qc5.png
U5 = gate_sequence_product(qc5.propagators())
U5
qc6 = qc3.resolve_gates(["ISWAP", "RX", "RY"])
qc6.png
U6 = gate_sequence_product(qc6.propagators())
U6
qc7 = qc3.resolve_gates(["CNOT", "RZ", "RX"])
qc7.png
U7 = gate_sequence_product(qc7.propagators())
U7
qc8 = QubitCircuit(3)
qc8.add_gate("CNOT", 2, 0)
qc8.png
U8 = gate_sequence_product(qc8.propagators())
U8
qc9 = qc8.adjacent_gates()
qc9.png
U9 = gate_sequence_product(qc9.propagators())
U9
qc10 = qc9.resolve_gates("CNOT")
qc10.png
U10 = gate_sequence_product(qc10.propagators())
U10
qc = QubitCircuit(1)
qc.add_gate("RX", targets=1)
qc.add_gate("RX", targets=1)
qc.add_gate("RY", targets=1, index=[1,0])
qc.gates
def user_gate1(arg_value):
# controlled rotation X
mat = np.zeros((4, 4), dtype=np.complex)
mat[0, 0] = mat[1, 1] = 1.
mat[2:4, 2:4] = rx(arg_value)
return Qobj(mat, dims=[[2, 2], [2, 2]])
def user_gate2():
# S gate
mat = np.array([[1., 0],
[0., 1.j]])
return Qobj(mat, dims=[[2], [2]])
qc = QubitCircuit(2)
qc.user_gates = {"CTRLRX": user_gate1,
"S" : user_gate2}
# qubit 0 controls qubit 1
qc.add_gate("CTRLRX", targets=[0,1], arg_value=pi/2)
# qubit 1 controls qubit 0
qc.add_gate("CTRLRX", targets=[1,0], arg_value=pi/2)
# a gate can also be added using the Gate class
g_T = Gate("S", targets=[1])
qc.add_gate("S", targets=[1])
props = qc.propagators()
props[0] # qubit 0 controls qubit 1
props[1] # qubit 1 controls qubit 0
props[2] # S gate acts on qubit 1
from qutip.ipynbtools import version_table
version_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Decorator with arguments
Step2: Decorator with or without arguments
Step4: Example from NetworkUnit
|
<ASSISTANT_TASK:>
Python Code:
def decorator_wo_args(original_function):
def wrapped_function(*args, **kwargs):
print('args:', args, 'kwargs:', kwargs)
out = original_function(*args, **kwargs)
return out
return wrapped_function
@decorator_wo_args
def func_dec_wo_args(func_arg='!'):
print('func_dec_wo_args', func_arg)
func_dec_wo_args()
def decorator_w_args(argument):
def _decorate(function):
# functools fixes function metadata (e.g. name)
@functools.wraps(function)
def wrapped_function(*args, **kwargs):
print('args:', args, 'kwargs:', kwargs)
print('decorator argument:', argument)
out = function(*args, **kwargs)
return out
return wrapped_function
return _decorate
@decorator_w_args(42)
def func_dec_w_args(func_arg='!'):
print('func_dec_w_args', func_arg)
func_dec_w_args(func_arg='?')
def decorator_w_or_wo_args(original_function=None, optional_argument=None):
def _decorate(function):
# functools fixes function metadata (e.g. name)
@functools.wraps(function)
def wrapped_function(*args, **kwargs):
print('args:', args, 'kwargs:', kwargs)
print('decorator argument:', optional_argument)
out = function(*args, **kwargs)
return out
return wrapped_function
if original_function:
return _decorate(original_function)
else:
return _decorate
@decorator_w_or_wo_args
def func_dec_w_or_wo_args1(func_arg='!'):
print('func_dec_w_or_wo_args1', func_arg)
@decorator_w_or_wo_args(optional_argument=99)
# in this case it the decorator argument must have a keyword
def func_dec_w_or_wo_args2(func_arg='!'):
print('func_dec_w_or_wo_args2', func_arg)
func_dec_w_or_wo_args1('!?')
func_dec_w_or_wo_args2()
def use_prediction_cache(generate_prediction_func=None, hash_key=None):
Decorator for the `generate_prediction()` function of the tests, handles
cached prediction loading, parameter update and prediction saving.
def _decorate(function):
@functools.wraps(function)
def wrapper(self, model):
# Check if predictions were already calculated
prediction = self.get_prediction(model, key=hash_key)
# If any parameter was specified by the user in the generate_prediction
# function the predictions are recalculated
if prediction is None:
# Generate and save prediction
prediction = function(self, model)
self.set_prediction(model, prediction, key=hash_key)
return prediction
return wrapper
if generate_prediction_func:
return _decorate(generate_prediction_func)
else:
return _decorate
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we will import the data we saved previously using the pickle library.
Step2: Now we need to define the Keras model. Since we will be loading parameters from a pre-trained model, this needs to match exactly the definition from the previous lab section. The only difference is that we will comment out the dropout layer so that the model uses all the hidden neurons when doing the predictions.
Step3: Next we will load the parameters from the model we trained previously, and compile it with the same loss and optimizer function.
Step4: We also need to rewrite the sample() and generate() helper functions so that we can use them in our code
Step5: Now we can use the generate() function to generate text of any length based on our imported pre-trained model and a seed text of our choice. For best result, the length of the seed text should be the same as the length of training sequences (100 in the previous lab section).
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import sys
import re
import pickle
pickle_file = '-basic_data.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
X = save['X']
y = save['y']
char_to_int = save['char_to_int']
int_to_char = save['int_to_char']
del save # hint to help gc free up memory
print('Training set', X.shape, y.shape)
# define the LSTM model
model = Sequential()
model.add(LSTM(128, return_sequences=False, input_shape=(X.shape[1], X.shape[2])))
# model.add(Dropout(0.50))
model.add(Dense(y.shape[1], activation='softmax'))
# load the parameters from the pretrained model
filename = "-basic_LSTM.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def generate(sentence, sample_length=50, diversity=0.35):
generated = sentence
sys.stdout.write(generated)
for i in range(sample_length):
x = np.zeros((1, X.shape[1], X.shape[2]))
for t, char in enumerate(sentence):
x[0, t, char_to_int[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = int_to_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
prediction_length = 500
seed_from_text = "america has shown that progress is possible. last year, income gains were larger for households at t"
seed_original = "and as people around the world began to hear the tale of the lowly colonists who overthrew an empire"
for seed in [seed_from_text, seed_original]:
generate(seed, prediction_length, .50)
print("-" * 20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
optim = load_data()
for param_group in optim.param_groups:
param_group['lr'] = 0.0005
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding single values
Step2: Adding multiple values at once
Step3: Adding two adaptive histograms together
|
<ASSISTANT_TASK:>
Python Code:
# Necessary import evil
import physt
from physt import h1, h2, histogramdd
import numpy as np
import matplotlib.pyplot as plt
# Create an empty histogram
h = h1(None, "fixed_width", bin_width=10, name="People height", axis_name="cm", adaptive=True)
h
# Add a first value
h.fill(157)
h.plot()
h
# Add a second value
h.fill(173)
h.plot()
# Add a few more values, including weights
h.fill(173, 2)
h.fill(186, 5)
h.fill(188, 3)
h.fill(193, 1)
h.plot(errors=True, show_stats=True);
ha = h1(None, "fixed_width", bin_width=10, adaptive=True)
ha.plot(show_stats=True);
# Beginning
ha.fill_n([10, 11, 34])
ha.plot();
# Add a distant value
ha.fill_n([234], weights=[10])
ha.plot(show_stats=True);
# Let's create a huge dataset
values = np.random.normal(130, 20, 100000)
%%time
# Add lots of values (no loop in Python)
hn = h1(None, "fixed_width", bin_width=10, adaptive=True)
hn.fill_n(values)
# ha.plot()
%%time
# Comparison with Python loop
hp = h1(None, "fixed_width", bin_width=10, adaptive=True)
for value in values:
hp.fill(value)
# Hopefully equal results
print("Equal?", hp == hn)
hp.plot(show_stats=True);
ha1 = h1(None, "fixed_width", bin_width=5, adaptive=True)
ha1.fill_n(np.random.normal(100, 10, 1000))
ha2 = h1(None, "fixed_width", bin_width=5, adaptive=True)
ha2.fill_n(np.random.normal(70, 10, 500))
ha = ha1 + ha2
fig, ax= plt.subplots()
ha1.plot(alpha=0.1, ax=ax, label="1", color="red")
ha2.plot(alpha=0.1, ax=ax, label="2")
ha.plot("scatter", label="sum", ax=ax, errors=True)
ax.legend(loc=2); # TODO? Why don't we show the sum???
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: тренд имеет простую, легко объяснимую инфляцией, структуру
Step2: И визуально, и воспользовашись критерием Дики-Фуллера, мы можем понять, что ряд по-прежнему не является стационарным. Но размах сезонных колебаний значительно уменьшился. Попробуем сезонное дифференцирование; сделаем на продифференцированном ряде STL-декомпозицию и проверим стационарность
Step3: Критерий Дики-Фуллера отвергает гипотезу нестационарности ряда на уровне значимости 0.05, но мы по-прежнему видим тренд. Попробуем добавить еще обычное дифференцирование
Step4: Гипотеза нестационарности с уверенностью отвергается, и визуально ряд выглядит лучше. На трендовой компоненте не видно никакого систематического поведения - график колеблется в районе 0.
Step5: максимальный сезонный лаг отличающийся от нуля отсутствует => Q = 0
Step6: переберем все возможные значения параметров P, p, Q, q
Step7: Лучшая модель (минимальный aic = 38.9, parameters = (1, 0, 1, 0))
Step8: Остатки
Step9: Остатки несмещены (подтверждается критерием Стьюдента) стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой).
Step10: Прогноз
|
<ASSISTANT_TASK:>
Python Code:
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wages.WAG_C_M).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wages.WAG_C_M)[1])
wages['wages_box'], lmbda = stats.boxcox(wages.WAG_C_M)
plt.figure(figsize(15,7))
wages.wages_box.plot()
plt.ylabel('Transformed wages')
print("Оптимальный параметр преобразования Бокса-Кокса: %f" % lmbda)
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wages.wages_box)[1])
wages['wages_box_diff'] = wages.wages_box - wages.wages_box.shift(12)
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wages.wages_box_diff[12:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wages.wages_box_diff[12:])[1])
wages['wages_box_diff2'] = wages.wages_box_diff - wages.wages_box_diff.shift(1)
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wages.wages_box_diff2[13:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wages.wages_box_diff2[13:])[1])
plt.figure(figsize(15,8))
ax = plt.subplot(211)
sm.graphics.tsa.plot_acf(wages.wages_box_diff2[13:].values.squeeze(), lags=48, ax=ax)
pylab.show()
ax = plt.subplot(212)
sm.graphics.tsa.plot_pacf(wages.wages_box_diff2[13:].values.squeeze(), lags=48, ax=ax)
pylab.show()
ps = range(0, 2)
d=1
qs = range(0, 2)
Ps = range(0, 2)
D=1
Qs = range(0, 1)
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
%%time
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
#try except нужен, потому что на некоторых наборах параметров модель не обучается
try:
model=sm.tsa.statespace.SARIMAX(wages.wages_box, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1)
#выводим параметры, на которых модель не обучается и переходим к следующему набору
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
#сохраняем лучшую модель, aic, параметры
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
warnings.filterwarnings('default')
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
plt.figure(figsize(15,8))
plt.subplot(211)
best_model.resid[13:].plot()
plt.ylabel(u'Residuals')
ax = plt.subplot(212)
sm.graphics.tsa.plot_acf(best_model.resid[13:].values.squeeze(), lags=48, ax=ax)
print("Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1])
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1])
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
wages['model'] = invboxcox(best_model.fittedvalues, lmbda)
plt.figure(figsize(15,7))
wages.WAG_C_M.plot()
wages.model[13:].plot(color='r')
plt.ylabel('wages')
pylab.show()
wages2 = wages[['WAG_C_M']]
date_list = [datetime.datetime.strptime("2017-07-01", "%Y-%m-%d") + relativedelta(months=x) for x in range(0,24)]
future = pd.DataFrame(index=date_list, columns= wages2.columns)
wages2 = pd.concat([wages2, future])
wages2['forecast'] = invboxcox(best_model.predict(start=294, end=317), lmbda)
plt.figure(figsize(15,7))
wages2.WAG_C_M.plot()
wages2.forecast.plot(color='r')
plt.ylabel('wages')
pylab.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
# Set self.activation_function (used for hidden layer) to sigmoid function
self.activation_function = lambda x : 1 / (1 + np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# Hidden layer
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
### Backward pass ##
# Output error
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, error)
# Backpropagated error terms
output_error_term = error # d/dx(f(x)=x) = d/dx(x) = 1
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # Suggested simplification
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# Update the weights
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# Hidden layer
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 1250
learning_rate = 0.1
hidden_nodes = 15
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numeric widgets
Step2: Sliders can also be displayed vertically.
Step3: FloatProgress
Step4: BoundedFloatText
Step5: FloatText
Step6: Boolean widgets
Step7: Checkbox
Step8: Valid
Step9: Selection widgets
Step10: The following is also valid
Step11: RadioButtons
Step12: Select
Step13: ToggleButtons
Step14: SelectMultiple
Step15: String widgets
Step16: Textarea
Step17: Latex
Step18: HTML
Step19: Button
|
<ASSISTANT_TASK:>
Python Code:
import ipywidgets as widgets
# Show all available widgets!
widgets.Widget.widget_types.values()
widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Test:',
)
widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Test',
orientation='vertical',
)
widgets.FloatProgress(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Loading:',
)
widgets.BoundedFloatText(
value=7.5,
min=5.0,
max=10.0,
description='Text:',
)
widgets.FloatText(
value=7.5,
description='Any:',
)
widgets.ToggleButton(
description='Click me',
value=False,
)
widgets.Checkbox(
description='Check me',
value=True,
)
widgets.Valid(
value=True,
)
from IPython.display import display
w = widgets.Dropdown(
options=['1', '2', '3'],
value='2',
description='Number:',
)
display(w)
# Show value
w.value
w = widgets.Dropdown(
options={'One': 1, 'Two': 2, 'Three': 3},
value=2,
description='Number:')
display(w)
w.value
widgets.RadioButtons(
description='Pizza topping:',
options=['pepperoni', 'pineapple', 'anchovies'],
)
widgets.Select(
description='OS:',
options=['Linux', 'Windows', 'OSX'],
)
widgets.ToggleButtons(
description='Speed:',
options=['Slow', 'Regular', 'Fast'],
)
w = widgets.SelectMultiple(
description="Fruits",
options=['Apples', 'Oranges', 'Pears'])
display(w)
w.value
widgets.Text(
description='String:',
value='Hello World',
)
widgets.Textarea(
description='String:',
value='Hello World',
)
widgets.Latex(
value="$$\\frac{n!}{k!(n-k)!}$$",
)
widgets.HTML(
value="Hello <b>World</b>"
)
widgets.Button(description='Click me')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Representamos ambos diámetros en la misma gráfica
Step2: Mostramos la representación gráfica de la media de las muestras
Step3: Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
Step4: Filtrado de datos
Step5: Representación de X/Y
Step6: Analizamos datos del ratio
Step7: Límites de calidad
|
<ASSISTANT_TASK:>
Python Code:
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('BQ.CSV')
%pylab inline
#Mostramos un resumen de los datos obtenidoss
datos.describe()
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X [mm]', 'Diametro Y [mm]', 'RPM']
#Mostramos en varias gráficas la información obtenida tras el ensayo
datos[columns].plot(subplots=True, figsize=(20,20))
datos.ix[:, "Diametro X [mm]":"Diametro Y [mm]"].plot(figsize=(16,3))
datos.ix[:, "Diametro X [mm]":"Diametro Y [mm]"].boxplot(return_type='axes')
pd.rolling_mean(datos[columns], 50).plot(subplots=True, figsize=(12,12))
plt.scatter(x=datos['Diametro X [mm]'], y=datos['Diametro Y [mm]'], marker='.')
datos_filtrados = datos[(datos['Diametro X [mm]'] >= 0.9) & (datos['Diametro Y [mm]'] >= 0.9)]
plt.scatter(x=datos_filtrados['Diametro X [mm]'], y=datos_filtrados['Diametro Y [mm]'], marker='.')
ratio = datos_filtrados['Diametro X [mm]']/datos_filtrados['Diametro Y [mm]']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X [mm]'] > Th_u) | (datos['Diametro X [mm]'] < Th_d) |
(datos['Diametro Y [mm]'] > Th_u) | (datos['Diametro Y [mm]'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple classification model using Keras with Cloud TPUs
Step2: Resolve TPU Address
Step3: FLAGS used as model params
Step5: Download training input data and define prediction input & output
Step6: Define a Keras model (2 hidden layers with 10 neurons in each)
Step7: Compiling the model with a distribution strategy
Step8: Train the model on TPU
Step9: Evaluation of the model
Step10: Save the model
Step11: Prediction
Step12: Prediction on TPU
Step13: Prediction on CPU
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
import os
import pandas as pd
import pprint
import tensorflow as tf
import time
import numpy as np
from tensorflow import keras
print(tf.__version__)
import distutils
if distutils.version.LooseVersion(tf.__version__) < '1.14':
raise Exception('This notebook is compatible with TensorFlow 1.14 or higher, for TensorFlow 1.13 or lower please use the previous version at https://github.com/tensorflow/tpu/blob/r1.13/tools/colab/classification_iris_data_with_keras.ipynb')
use_tpu = True #@param {type:"boolean"}
if use_tpu:
assert 'COLAB_TPU_ADDR' in os.environ, 'Missing TPU; did you request a TPU in Notebook Settings?'
if 'COLAB_TPU_ADDR' in os.environ:
TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
else:
TF_MASTER=''
# Model specific parameters
# TPU address
tpu_address = TF_MASTER
# Number of epochs
epochs = 50
# Number of steps_per_epoch
steps_per_epoch = 5
# NOTE: Total number of training steps = Number of epochs * Number of steps_per_epochs
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
PREDICTION_INPUT_DATA = {
'SepalLength': [6.9, 5.1, 5.9, 6.0, 5.5, 6.2, 5.5, 6.3],
'SepalWidth': [3.1, 3.3, 3.0, 3.4, 2.5, 2.9, 4.2, 2.8],
'PetalLength': [5.4, 1.7, 4.2, 4.5, 4.0, 4.3, 1.4, 5.1],
'PetalWidth': [2.1, 0.5, 1.5, 1.6, 1.3, 1.3, 0.2, 1.5],
}
PREDICTION_OUTPUT_DATA = ['Virginica', 'Setosa', 'Versicolor', 'Versicolor', 'Versicolor', 'Versicolor', 'Setosa', 'Virginica']
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Species'):
Returns the iris dataset as (train_x, train_y), (test_x, test_y).
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32,
'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32})
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32,
'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32})
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
def get_model():
return keras.Sequential([
keras.layers.Dense(10, input_shape=(4,), activation=tf.nn.relu, name = "Dense_1"),
keras.layers.Dense(10, activation=tf.nn.relu, name = "Dense_2"),
keras.layers.Dense(3, activation=None, name = "logits"),
keras.layers.Dense(3, activation=tf.nn.softmax, name = "softmax")
])
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(TF_MASTER)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
with strategy.scope():
model = get_model()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
model.summary()
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data()
# Train the model
model.fit(
train_x.values, train_y.values,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
)
model.evaluate(test_x.values, test_y.values,
batch_size=8)
model.save_weights('./DNN_TPU_1024.h5', overwrite=True)
COLUMNS_NAME=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']
data = pd.DataFrame(PREDICTION_INPUT_DATA, columns=COLUMNS_NAME)
print(data)
predictions = model.predict(data.values.astype(np.float32))
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
for pred_dict, expec in zip(predictions, PREDICTION_OUTPUT_DATA):
class_index = np.argmax(pred_dict)
class_probability = np.max(pred_dict)
print(template.format(SPECIES[class_index], 100*class_probability, expec))
cpu_model = get_model()
cpu_model.load_weights('./DNN_TPU_1024.h5')
cpu_predictions = cpu_model.predict(data)
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
for pred_dict, expec in zip(cpu_predictions, PREDICTION_OUTPUT_DATA):
class_index = np.argmax(pred_dict)
class_probability = np.max(pred_dict)
print(template.format(SPECIES[class_index], 100*class_probability, expec))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id = "config"></a>
Step2: <a id = "params"></a>
Step3: <a id = "pipeline"></a>
Step4: <a id = "results"></a>
Step5: <a id = "qc"></a>
Step6: <a id = "alignment"></a>
Step7: <a id = "clonal"></a>
Step8: <a id = "autocorr"></a>
Step9: <a id = "homer"></a>
Step10: Number of Peaks Per Sample
Step11: IP Efficiency
Step12: <a id = "annot"></a>
Step13: Annotation
Step14: <a id = "kegg"></a>
Step15: <a id = "motif"></a>
Step16: <a id = "promoters"></a>
Step17: <a id = "genes"></a>
Step18: <a id = "venn2"></a>
Step19: <a id = "browse"></a>
|
<ASSISTANT_TASK:>
Python Code:
#Omics Pipe Overview
from IPython.display import Image
Image(filename='/data/chip/2606129465-omics_pipe_overview.png', width=500, height=100)
#Import Omics pipe and module dependencies
import yaml
from omics_pipe.parameters.default_parameters import default_parameters
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
import csv
from omics_pipe.utils import *
from IPython.display import IFrame
import pandas
import glob
import os
import matplotlib.pyplot as plt
from matplotlib_venn import venn2,venn3, venn3_circles
%matplotlib inline
#%matplotlib notebook
import qgrid
qgrid.nbinstall(overwrite=True)
qgrid.set_defaults(remote_js=True, precision=4)
from IPython.display import HTML
import mygene
#Download scripts from https://github.com/gdavidson/ChIPseq_tools
sys.path.append('/data/chip/ChIPseq_tools-master') #append path to downloaded scripts
import compareGeneLists as compare
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
#Change top directory to locate result files
os.chdir("/data/chip")
###Customize parameters: Specify sample names and conditions
sample_names = ["1_2percent_input_R1",
"1_h3k4me3_R1",
"1_h3k9ac_R1",
"1_h3k9me3_R1",
"2_2percent_input_R1",
"2_h3k4me3_R1",
"2_h3k9ac_R1",
"2_h3k9me3_R1",
"3_2percent_input_R1",
"3_h3k4me3_R1",
"3_h3k9ac_R1",
"3_h3k9me3_R1",
"4_2percent_input_R1",
"4_h3k4me3_R1",
"4_h3k9ac_R1",
"4_h3k9me3_R1",
"5_2percent_input_R1",
"5_h3k4me3_R1",
"5_h3k9ac_R1",
"5_h3k9me3_R1",
"6_2percent_input_R1",
"6_h3k4me3_R1",
"6_h3k9ac_R1",
"6_h3k9me3_R1"]
condition = ["Control",
"H3K4me3",
"H3K4ac",
"H3K9me3",
"Control",
"H3K4me3",
"H3K4ac",
"H3K9me3",
"Control",
"H3K4me3",
"H3K4ac",
"H3K9me3",
"Control",
"H3K4me3",
"H3K4ac",
"H3K9me3",
"Control",
"H3K4me3",
"H3K4ac",
"H3K9me3",
"Control",
"H3K4me3",
"H3K4ac",
"H3K9me3"
]
lib_type = ["single_end"]*len(condition)
#Update Metadata File
meta = {'Sample': pandas.Series(sample_names), 'condition': pandas.Series(condition) , 'libType': pandas.Series(lib_type)}
meta_df = pandas.DataFrame(data = meta)
deseq_meta_new = "/data/chip/new_meta.csv"
meta_df.to_csv(deseq_meta_new,index=False)
print meta_df
#Define pairs for differential peak calling (ChIP-input or Treatment-Control)
pairs = '1_h3k4me3_R1-1_2percent_input_R1 1_h3k9ac_R1-1_2percent_input_R1 1_h3k9me3_R1-1_2percent_input_R1 2_h3k4me3_R1-2_2percent_input_R1 2_h3k9ac_R1-2_2percent_input_R1 2_h3k9me3_R1-2_2percent_input_R1 3_h3k4me3_R1-3_2percent_input_R1 3_h3k9ac_R1-3_2percent_input_R1 3_h3k9me3_R1-3_2percent_input_R1 4_h3k4me3_R1-4_2percent_input_R1 4_h3k9ac_R1-4_2percent_input_R1 4_h3k9me3_R1-4_2percent_input_R1 5_h3k4me3_R1-5_2percent_input_R1 5_h3k9ac_R1-5_2percent_input_R1 5_h3k9me3_R1-5_2percent_input_R1 6_h3k4me3_R1-6_2percent_input_R1 6_h3k9ac_R1-6_2percent_input_R1 6_h3k9me3_R1-6_2percent_input_R1 6_2percent_input_R1-4_2percent_input_R1 6_h3k4me3_R1-4_h3k4me3_R1 6_h3k9ac_R1-4_h3k9ac_R1 6_h3k9me3_R1-4_h3k9me3_R1 5_2percent_input_R1-4_2percent_input_R1 5_h3k4me3_R1-4_h3k4me3_R1 5_h3k9ac_R1-4_h3k9ac_R1 5_h3k9me3_R1-4_h3k9me3_R1 6_2percent_input_R1-3_2percent_input_R1 6_h3k4me3_R1-3_h3k4me3_R1 6_h3k9ac_R1-3_h3k9ac_R1 6_h3k9me3_R1-3_h3k9me3_R1 5_2percent_input_R1-3_2percent_input_R1 5_h3k4me3_R1-3_h3k4me3_R1 5_h3k9ac_R1-3_h3k9ac_R1 5_h3k9me3_R1-3_h3k9me3_R1 1_2percent_input_R1-3_2percent_input_R1 1_h3k4me3_R1-3_h3k4me3_R1 1_h3k9ac_R1-3_h3k9ac_R1 1_h3k9me3_R1-3_h3k9me3_R1 2_2percent_input_R1-3_2percent_input_R1 2_h3k4me3_R1-3_h3k4me3_R1 2_h3k9ac_R1-3_h3k9ac_R1 2_h3k9me3_R1-3_h3k9me3_R1 1_2percent_input_R1-4_2percent_input_R1 1_h3k4me3_R1-4_h3k4me3_R1 1_h3k9ac_R1-4_h3k9ac_R1 1_h3k9me3_R1-4_h3k9me3_R1 2_2percent_input_R1-4_2percent_input_R1 2_h3k4me3_R1-4_h3k4me3_R1 2_h3k9ac_R1-4_h3k9ac_R1 2_h3k9me3_R1-4_h3k9me3_R1'
#Define pairs of peaks to compare
pairs_to_compare = ['5_h3k4me3_R1_vs_5_2percent_input_R1-3_h3k4me3_R1_vs_3_2percent_input_R1','5_h3k9ac_R1_vs_5_2percent_input_R1-3_h3k9ac_R1_vs_3_2percent_input_R1','5_h3k9me3_R1_vs_5_2percent_input_R1-3_h3k9me3_R1_vs_3_2percent_input_R1','5_h3k4me3_R1_vs_5_2percent_input_R1-4_h3k4me3_R1_vs_4_2percent_input_R1','5_h3k9ac_R1_vs_5_2percent_input_R1-4_h3k9ac_R1_vs_4_2percent_input_R1','5_h3k9me3_R1_vs_5_2percent_input_R1-4_h3k9me3_R1_vs_4_2percent_input_R1','6_h3k4me3_R1_vs_6_2percent_input_R1-3_h3k4me3_R1_vs_3_2percent_input_R1','6_h3k9ac_R1_vs_6_2percent_input_R1-3_h3k9ac_R1_vs_3_2percent_input_R1','6_h3k9me3_R1_vs_6_2percent_input_R1-3_h3k9me3_R1_vs_3_2percent_input_R1','6_h3k4me3_R1_vs_6_2percent_input_R1-4_h3k4me3_R1_vs_4_2percent_input_R1','6_h3k9ac_R1_vs_6_2percent_input_R1-4_h3k9ac_R1_vs_4_2percent_input_R1','6_h3k9me3_R1_vs_6_2percent_input_R1-4_h3k9me3_R1_vs_4_2percent_input_R1','1_h3k4me3_R1_vs_1_2percent_input_R1-3_h3k4me3_R1_vs_3_2percent_input_R1','1_h3k9ac_R1_vs_1_2percent_input_R1-3_h3k9ac_R1_vs_3_2percent_input_R1','1_h3k9me3_R1_vs_1_2percent_input_R1-3_h3k9me3_R1_vs_3_2percent_input_R1','2_h3k4me3_R1_vs_2_2percent_input_R1-4_h3k4me3_R1_vs_4_2percent_input_R1','2_h3k9ac_R1_vs_2_2percent_input_R1-4_h3k9ac_R1_vs_4_2percent_input_R1','2_h3k9me3_R1_vs_2_2percent_input_R1-4_h3k9me3_R1_vs_4_2percent_input_R1','1_h3k4me3_R1_vs_1_2percent_input_R1-3_h3k4me3_R1_vs_3_2percent_input_R1','1_h3k9ac_R1_vs_1_2percent_input_R1-3_h3k9ac_R1_vs_3_2percent_input_R1','1_h3k9me3_R1_vs_1_2percent_input_R1-3_h3k9me3_R1_vs_3_2percent_input_R1','2_h3k4me3_R1_vs_2_2percent_input_R1-4_h3k4me3_R1_vs_4_2percent_input_R1','2_h3k9ac_R1_vs_2_2percent_input_R1-4_h3k9ac_R1_vs_4_2percent_input_R1','2_h3k9me3_R1_vs_2_2percent_input_R1-4_h3k9me3_R1_vs_4_2percent_input_R1','6_h3k4me3_R1_vs_4_h3k4me3_R1-6_h3k9me3_R1_vs_4_h3k9me3_R1','5_h3k4me3_R1_vs_4_h3k4me3_R1-5_h3k9me3_R1_vs_4_h3k9me3_R1','6_h3k4me3_R1_vs_3_h3k4me3_R1-6_h3k9me3_R1_vs_3_h3k9me3_R1','5_h3k4me3_R1_vs_3_h3k4me3_R1-5_h3k9me3_R1_vs_3_h3k9me3_R1','6_h3k9ac_R1_vs_4_h3k9ac_R1-6_h3k9me3_R1_vs_4_h3k9me3_R1','5_h3k9ac_R1_vs_4_h3k9ac_R1-5_h3k9me3_R1_vs_4_h3k9me3_R1','6_h3k9ac_R1_vs_3_h3k9ac_R1-6_h3k9me3_R1_vs_3_h3k9me3_R1','5_h3k9ac_R1_vs_3_h3k9ac_R1-5_h3k9me3_R1_vs_3_h3k9me3_R1','6_h3k4me3_R1_vs_4_h3k4me3_R1-6_h3k9ac_R1_vs_4_h3k9ac_R1','5_h3k4me3_R1_vs_4_h3k4me3_R1-5_h3k9ac_R1_vs_4_h3k9ac_R1','6_h3k4me3_R1_vs_3_h3k4me3_R1-6_h3k9ac_R1_vs_3_h3k9ac_R1','5_h3k4me3_R1_vs_3_h3k4me3_R1-5_h3k9ac_R1_vs_3_h3k9ac_R1']
###Update parameters, such as GENOME, GTF_FILE, paths, etc
parameters = "/root/src/omics-pipe/tests/test_params_ChIPseq_HOMER_AWS.yaml"
stream = file(parameters, 'r')
params = yaml.load(stream)
params.update({"SAMPLE_LIST": sample_names})
params.update({"PAIR_LIST": pairs})
params.update({"R_VERSION": '3.2.3'})
params.update({"GENOME": '/database/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa'})
params.update({"REF_GENES": '/database/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf'})
params.update({"RAW_DATA_DIR": '/data/data'})
params.update({"TEMP_DIR": '/data/data/tmp'})
params.update({"PIPE_MULTIPROCESS": 100})
params.update({"STAR_VERSION": '2.4.5a'})
params.update({"PARAMS_FILE": '/data/results/updated_params.yaml'})
params.update({"LOG_PATH": ':/data/results/logs'})
params.update({"QC_PATH": "/data/results/QC"})
params.update({"FLAG_PATH": "/data/results/flags"})
params.update({"BOWTIE_RESULTS": "/data/results/bowtie"})
params.update({"HOMER_RESULTS": "/data/results/homer"})
params.update({"BOWTIE_INDEX": "/data/database/Homo_sapiens/UCSC/hg19/Sequence/BowtieIndex/genome"})
params.update({"ENDS": 'SE'})
params.update({"HOMER_VERSION": '4.6'})
params.update({"TRIMMED_DATA_PATH": "/data/results/trimmed"})
params.update({"HOMER_TRIM_OPTIONS": "-3 GATCGGAAGAGCACACGTCT -mis 1 -minMatchLength 6 -min 45"})
params.update({"HOMER_PEAKS_OPTIONS": "-o auto -region -size 1000 -minDist 2500"})
params.update({"HOMER_MOTIFS_OPTIONS": "-start -1000 -end 100 -len 8,10 -p 4"})
params.update({"HOMER_ANNOTATE_OPTIONS":""})
params.update({"HOMER_GENOME": "hg19"})
#update params
default_parameters.update(params)
#write yaml file
stream = file('updated_params.yaml', 'w')
yaml.dump(params,stream)
p = Bunch(default_parameters)
#View Parameters
print "Run Parameters: \n" + str(params)
### Omics Pipe Pipelines
from IPython.display import Image
Image(filename='/data/chip/2365251253-omics_pipe_pipelines_20140402.png', width=700, height=250)
###Run Omics Pipe from the command line
!omics_pipe ChIPseq_HOMER /data/chip/updated_params.yaml
#Change top directory to locate result files
os.chdir("/data/chip")
#Display Omics Pipe Pipeline Run Status
#pipeline = './flags/pipeline_combined_%s.pdf' % date
pipeline = './flags/pipeline_combined_2016-05-16 17:41.pdf'
IFrame(pipeline, width=700, height=500)
###Summarize FastQC raw data QC results per sample
results_dir = './QC/'
# Below is the complete list of labels in the summary file
summary_labels = ["Basic Statistics", "Per base sequence quality", "Per tile sequence quality",
"Per sequence quality scores", "Per base sequence content", "Per sequence GC content",
"Per base N content", "Sequence Length Distribution", "Sequence Duplication Levels",
"Overrepresented sequences", "Adapter Content", "Kmer Content"]
# Below is the list I anticipate caring about; I leave the full list above in case it turns out later
# I anticipated wrong and need to update this one.
labels_of_interest = ["Basic Statistics", "Per base sequence quality"]
# Look for each file named summary.txt in each subdirectory named *_fastqc in the results directory
summary_wildpath = os.path.join(results_dir, '*/*_fastqc', "summary.txt")
summary_filepaths = [x for x in glob.glob(summary_wildpath)]
#print os.getcwd()
# Examine each of these files to find lines starting with "FAIL" or "WARN"
for curr_summary_path in summary_filepaths:
has_error = False
#print(divider)
with open(curr_summary_path, 'r') as f:
for line in f:
if line.startswith("FAIL") or line.startswith("WARN"):
fields = line.split("\t")
if not has_error:
print(fields[2].strip() + ": PASS") # the file name
has_error = True
if fields[1] in labels_of_interest:
print(fields[0] + "\t" + fields[1])
#Display QC results for individual samples
sample = "6_h3k9me3_R1"
name = '/data/chip/QC/%s_fastqc/fastqc_report.html' % (sample)
#name = './QC/%s/%s_fastqc/fastqc_report.html' % (sample,sample)
IFrame(name, width=1000, height=600)
#Run samstat to produce summary statistics from Bowtie output
!samstat ./bowtie/*/*.bam
##Summarize Alignment QC Statistics
import sys
from io import StringIO
align_dir = './bowtie/'
# Look for each file named summary.txt in each subdirectory named *_fastqc in the results directory
summary_wildpath = os.path.join(align_dir, '*/', "*.bam.samstat.html")
#summary_wildpath = os.path.join(star_dir, "*Log.final.out")
summary_filepaths = [x for x in glob.glob(summary_wildpath)]
#print summary_filepaths
alignment_stats = pandas.DataFrame()
for curr_summary_path in summary_filepaths:
#with open(curr_summary_path, 'r') as f:
filename = curr_summary_path.replace("./bowtie/","")
filename2 = filename.replace(".bam.samstat.html","")
filename3 = filename2.replace("/*","")
dfs = pandas.read_html(curr_summary_path, header =0)
df = dfs[0]
raw_reads1 = df["Number"]
raw_reads = raw_reads1[6]
aligned_reads1 = df["Number"]
aligned_reads = aligned_reads1[0]
percent_aligned1 = df["Percentage"]
percent_aligned = percent_aligned1[0]
d = {"Sample": pandas.Series(filename3), "Raw_Reads": pandas.Series(float(raw_reads)),
"Aligned_Reads": pandas.Series(float(aligned_reads)),
"Percent_Uniquely_Aligned": pandas.Series(percent_aligned)}
p = pandas.DataFrame(data=d)
alignment_stats = alignment_stats.append(p)
#print alignment_stats
alignment_stats.to_csv("alignment_stats_summary.csv",index=False)
#View interactive table
qgrid.show_grid(alignment_stats, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Barplot of number of aligned reads per sample
plt.figure(figsize=(10,10))
ax = plt.subplot(111)
alignment_stats.plot(ax=ax, kind='barh', title='# of Reads')
ax.axis(x='off')
ax.axvline(x=10000000, linewidth=2, color='Red', zorder=0)
#plt.xlabel('# Aligned Reads',fontsize=16)
for i, x in enumerate(alignment_stats.Sample):
ax.text(0, i + 0, x, ha='right', va= "bottom", fontsize='medium')
plt.savefig('./alignment_stats_%s' %date ,dpi=300) # save figure
###Flag samples with poor alignment or low numbers of reads
df = alignment_stats
failed_samples = df.loc[(df.Aligned_Reads < 10000000) | (df.Percent_Uniquely_Aligned < 40), ['Sample','Raw_Reads', 'Aligned_Reads', 'Percent_Uniquely_Aligned']]
print failed_samples
#View interactive table
#qgrid.show_grid(failed_samples, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#View Alignment Statistics for failed samples
for failed in failed_samples["Sample"]:
#fname = "/data/results/star/%s/Log.final.out" % failed
fname = "./bowtie/%s.bam.samstat.html" % failed
print fname
IFrame(fname, width=1000, height=600)
###Samples that passed QC for alignment
passed_samples = df.loc[(df.Aligned_Reads > 10000000) | (df.Percent_Uniquely_Aligned > 40), ['Sample','Raw_Reads', 'Aligned_Reads', 'Percent_Uniquely_Aligned']]
print "Number of samples that passed alignment QC = " + str(len(passed_samples))
#View interactive table
#qgrid.show_grid(passed_samples, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#View Alignment Statistics for passed samples
for passed in passed_samples["Sample"]:
#fname = "/data/results/star/%s/Log.final.out" % passed
fname = "./bowtie/%s.bam.samstat.html" % passed
print passed
IFrame(fname, width=1000, height=600)
for sample in sample_names:
fi = "./%s/tagCountDistribution.txt" % sample
counts1 = pandas.read_csv(fi, sep="\t")
counts = counts1.head(10)
counts.set_index = 0
counts[[1]].plot.bar().set_title(sample)
plt.savefig('./clonal_distribution_plot_%s' %sample ,dpi=300) # save figure
for sample in sample_names:
fi = "./%s/tagAutocorrelation.txt" % sample
tags = pandas.read_csv(fi, sep="\t")
#Distance in bp(Fragment Length Estimate: 164)(Peak Width Estimate: 164) Same Strand (+ for Watson strand, - for Crick) Opposite Strand
tags.columns = ['Relative_Distance_Between_Reads(bp)', 'Same_Strand', 'Opposite_Strand']
ax1 = tags.plot(x='Relative_Distance_Between_Reads(bp)', y=['Same_Strand','Opposite_Strand'])
ax1.set_ylim(10000,250000)
ax1.set_xlim(-1000,1000)
ax1.set_title(sample)
plt.savefig('./autocorrelation_plot_%s' %sample ,dpi=300) # save figure
pairs1 = pairs.replace(" ", ",")
pairs2 = pairs1.replace("-", "_vs_")
pairs3 = pairs2.split(",")
peak_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/regions.txt" % pair
with open(fname, 'r') as fin:
head = [next(fin) for x in xrange(40)]
df = pandas.DataFrame(head)
df.columns=["col"]
df['col'] = df['col'].str.replace('\n','')
df = pandas.DataFrame(df.col.str.split('=',1).tolist(),columns = ['sample',pair])
df_items = df[['sample']]
df_values = df[[pair]]
peak_stats = pandas.concat([peak_stats, df_values],axis=1)
#print pair
peak_stats = pandas.concat([df_items,peak_stats],axis=1)
peak_stats =peak_stats.transpose()
peak_stats =peak_stats.dropna(axis=1)
peak_stats.columns = peak_stats.iloc[0]
peak_stats = peak_stats[1:]
peak_stats.to_csv("peak_stats_summary.csv",index=False)
#View interactive table
qgrid.show_grid(peak_stats, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Display peak summary graphs
#Barplot of number of peaks per sample
num_peaks = peak_stats.iloc[:,[1]]
num_peaks.columns = ["Number of Peaks"]
num_peaks =num_peaks.convert_objects(convert_numeric=True)
num_peaks = num_peaks.sort_values(["Number of Peaks"],axis=0,ascending=False)
num_peaks.plot.bar(figsize=(15, 5))
plt.savefig('./peaks_summary.png' ,dpi=300) # save figure
#Display IP efficiency summary graphs, with horizontal line at y=1
IP = peak_stats.iloc[:,[8]]
IP.columns = ["IP_Efficiency"]
IP['IP_Efficiency'] = IP['IP_Efficiency'].replace('%','',regex=True).astype('float')
IP =IP.sort_values(['IP_Efficiency'],axis=0,ascending=False)
IP.plot.bar(figsize=(15, 5))
plt.axhline(y=1, color = "red", linewidth = 2)
plt.savefig('./ipefficiency_summary.png' ,dpi=300) # save figure
#Summarize annotation stats
annot_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/regions.annotate.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = [c.replace(' ', '_') for c in fi.columns]
fi.Gene_Type.value_counts().plot(kind="pie",figsize=(6, 6))
plt.axis('equal')
plt.title(pair)
plt.savefig('./Peaks_Gene_Type_pie_%s.png' %pair ,dpi=300) # save figure
plt.show()
#qgrid.show_grid(fi, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#View interactive table
#qgrid.show_grid(peak_stats, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Summarize annotation stats
annot_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/regions.annotate.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = [c.replace(' ', '_') for c in fi.columns]
fi['Annotation'] = fi['Annotation'].replace('\(.*?\)','',regex=True)
fi['Annotation'] = fi['Annotation'].replace(' \.*?','',regex=True)
fi['Annotation'] = fi['Annotation'].replace('\..*$','',regex=True)
fi.Annotation.value_counts().plot(kind="pie", figsize=(8, 8))
plt.axis('equal')
plt.title(pair)
plt.savefig('./Peaks_Gene_Type_pie_%s' %pair ,dpi=300) # save figure
plt.show()
#Download scripts from https://github.com/gdavidson/ChIPseq_tools
import sys
sys.path.append('/data/chip/ChIPseq_tools-master') #append path to downloaded scripts
import getFromAnnotations as gfa
for pair in pairs3:
annotationList = gfa.getAnnotationList('%s/regions.annotate.txt' %pair)
#plot distances
try:
#pie chart
pieChartMap = gfa.getPieChartMap(annotationList)
gfa.pieChart(pieChartMap, pair)
plt.show()
plt.savefig('./Pie_Chart_with_numbers_%s' %pair ,dpi=300) # save figure
except ValueError:
next
#qgrid.show_grid(fi.sample(200), grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Summarize annotation stats
kegg_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s_GO/kegg.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = [c.replace(' ', '_') for c in fi.columns]
fi = fi.loc[fi["Enrichment"] < 0.05]
fi["comparison"] = pair
kegg_stats = kegg_stats.append(fi)
#write summary to file
kegg_stats.to_csv("kegg_stats_summary.csv",index=False)
#View interactive table
qgrid.show_grid(kegg_stats, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Summarize enriched motifs stats
motif_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/MotifOutput/knownResults.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = ["Motif_Name", "Consensus", "P-value", "Log_P-value", "q-value_Benjamini", "#TargetSequenceswithMotif",
"%TargetSequenceswithMotif","#BackgroundSequenceswithMotif", "%BackgroundSequenceswithMotif",]
fi = fi.loc[fi["P-value"] < 1e-50]
fi["comparison"] = pair
motif_stats = motif_stats.append(fi)
#write summary to file
motif_stats.to_csv("motif_stats_summary.csv",index=False)
#View interactive table
qgrid.show_grid(motif_stats, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#Summarize peaks in promoters
promoter_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/regions.annotate.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = [c.replace(' ', '_') for c in fi.columns]
fi['Annotation'] = fi['Annotation'].replace('\(.*?\)','',regex=True)
fi['Annotation'] = fi['Annotation'].replace(' \.*?','',regex=True)
fi['Annotation'] = fi['Annotation'].replace('\..*$','',regex=True)
fi = fi.loc[fi["Annotation"] == "promoter-TSS"]
fi["comparison"] = pair
fi.Gene_Type.value_counts().plot(kind="bar", figsize=(8, 8))
plt.title("Peaks in Promoters by Gene Type -" + pair)
plt.show()
plt.xlabel('Gene Type', fontsize=12)
plt.ylabel('# of Peaks', fontsize=12)
plt.savefig('./Promoter_Peaks_Gene_Type_bar_%s' %pair ,dpi=300) # save figure
promoter_stats = promoter_stats.append(fi)
#write summary to file
promoter_stats.to_csv("promoter_stats_summary.csv",index=False)
genes_df = pandas.read_csv("./genes_of_interest_validated_junctions.csv")
gene_names = genes_df["gene"]
genes_stats = pandas.DataFrame()
for pair in pairs3:
fname = "./%s/regions.annotate.txt" % pair
fi = pandas.read_csv(fname, sep="\t")
fi.columns = [c.replace(' ', '_') for c in fi.columns]
fi['Annotation'] = fi['Annotation'].replace('\(.*?\)','',regex=True)
fi['Annotation'] = fi['Annotation'].replace(' \.*?','',regex=True)
fi['Annotation'] = fi['Annotation'].replace('\..*$','',regex=True)
fi = fi.loc[fi["Gene_Name"].isin(gene_names)]
fi["comparison"] = pair
if fi.Annotation.empty:
next
else:
fi.Annotation.value_counts().plot(kind="bar", figsize=(8, 8))
plt.title("Peaks in Promoters by Annotation -" + pair)
plt.show()
plt.xlabel('Annotation', fontsize=12)
plt.ylabel('# of Peaks', fontsize=12)
plt.savefig('./Genes_of_Interest_Peaks_Annotation_bar_%s' %pair ,dpi=300) # save figure
genes_stats = genes_stats.append(fi)
#write summary to file
genes_stats.to_csv("genes_stats_summary.csv",index=False)
#Download scripts from https://github.com/gdavidson/ChIPseq_tools
import sys
sys.path.append('/data/chip/ChIPseq_tools-master') #append path to downloaded scripts
import getFromAnnotations as gfa
for pair in pairs3:
annotationList = gfa.getAnnotationList('%s/regions.annotate.txt' %pair)
#plot distances
try:
distanceList,countMap = gfa.getDistanceList(annotationList)
gfa.histDistances(distanceList, pair)
plt.show()
plt.savefig('./TSS_distance_%s' %pair ,dpi=300) # save figure
gfa.plotDistances(countMap)
plt.show()
plt.savefig('./TSS_distance_bp_%s' %pair ,dpi=300) # save figure
except ValueError:
next
genes_stats2 = genes_stats[['Gene_Name','comparison']]
genes_stats2.Gene_Name.value_counts().plot(kind="bar", figsize=(15, 8), stacked=True)
plt.xlabel('Genes of Interest', fontsize=12)
plt.ylabel('# of Peaks', fontsize=12)
plt.savefig('./Genes_of_Interest_Peaks_all.png' ,dpi=300) # save figure
genes_stats2 = genes_stats[['Gene_Name','comparison']]
genes_stats2.comparison.value_counts().plot(kind="barh", figsize=(15, 8), stacked=True)
plt.xlabel('# Peaks', fontsize=12)
plt.ylabel('Comparison', fontsize=12)
plt.savefig('./Genes_of_Interest_Comparison_Peaks_all.png' ,dpi=300) # save figure
sub_df = genes_stats2.groupby(['Gene_Name']).comparison.value_counts().unstack()
sub_df.plot(kind='bar',stacked=True, figsize=(15, 8)).legend(loc='center left', bbox_to_anchor=(1.0, 0.5) )
plt.xlabel('Genes of Interest', fontsize=12)
plt.ylabel('# of Peaks', fontsize=12)
plt.savefig('./Genes_of_Interest_Peaks_by_comparison.png' ,dpi=300) # save figure
sub_df = genes_stats2.groupby(['comparison']).Gene_Name.value_counts().unstack()
sub_df.plot(kind='barh',stacked=True, figsize=(15, 8)).legend(loc='center left', bbox_to_anchor=(1.0, 0.5) )
plt.xlabel('# Peaks', fontsize=12)
plt.ylabel('Comparison', fontsize=12)
plt.savefig('./Comparison_by_genes_of_interest.png' ,dpi=300) # save figure
for pairs in pairs_to_compare:
#print pairs
pairs_split = pairs.split("-")
pair1 = pairs_split[0]
pair2= pairs_split[1]
peaks1 = pandas.read_csv('./%s/regions.annotate.txt' %pair1, sep="\t")
peaks2 = pandas.read_csv('./%s/regions.annotate.txt' %pair2, sep="\t")
peaks1.columns = [c.replace(' ', '_') for c in peaks1.columns]
peaks1.columns.values[0] = "Peak_ID"
peaks2.columns = [c.replace(' ', '_') for c in peaks2.columns]
peaks2.columns.values[0] = "Peak_ID"
peaks1_list = peaks1['Gene_Name'].tolist()
peaks2_list = peaks2['Gene_Name'].tolist()
venn2([set(peaks1_list), set(peaks2_list)], (pair1,pair2))
plt.show()
plt.savefig('./Venn_Analysis_Genes_with_Peaks_%s.png' %pairs ,dpi=300) # save figure
commonGenes, uniqueL1, uniqueL2 = compare.compareLists(peaks1_list, peaks2_list)
commonGenes_df = pandas.DataFrame(commonGenes, columns = ["commonGenes"])
commonGenes_df.to_csv("CommonGenes_%s.csv" %pairs)
uniqueL1_df = pandas.DataFrame(uniqueL1, columns = ["uniqueL1"])
uniqueL1_df.to_csv("uniqueL1_%s_%s.csv" %(pair1, pairs))
uniqueL2_df = pandas.DataFrame(uniqueL2, columns = ["uniqueL2"])
uniqueL2_df.to_csv("uniqueL2_%s_%s.csv" %(pair2, pairs))
for sample in sample_names:
url = "http://ccbb-analysis.s3.amazonaws.com/%s/%s.ucsc.bedGraph.gz" %(sample,sample)
print url
IFrame("https://genome.ucsc.edu/cgi-bin/hgCustom?hgsid=504023239_5efJ2ONTkgrqUm6AcaAkNGcyXKmn", width=900, height=500)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extract all related track files
|
<ASSISTANT_TASK:>
Python Code:
song2TrackID = pkl.load(open(fmap, 'rb'))
{ k : song2TrackID[k] for k in sorted(song2TrackID.keys())[:10] }
trackIDs = sorted({trackID for value in song2TrackID.values() for trackID in value})
len(trackIDs)
trackIDs[:10]
%%script false
# TOO slow!
tar = None
flag = None
cnt = 0
for trackID in trackIDs:
cnt += 1
sys.stdout.write("\r%d / %d" % (cnt, len(trackIDs)))
sys.stdout.flush()
ftrack = trackID[2] + '/' + trackID[3] + '/' + trackID[4] + '/' + trackID + '.h5'
# practical solution: generate bash commands
# workflow: extract .tar.gz -> move files-of-interest -> remove all extracted files; repeat this.
#fnew = 'files/' + trackID + '.h5'
#if os.path.exists(os.path.join(data_dir, fnew)): continue
#print('mv', ftrack, fnew)
fnew = os.path.join(data_dir, 'files/' + trackID + '.h5')
ftar = os.path.join(msd_dir, trackID[2] + '.tar.gz')
if os.path.exists(fnew): continue
if flag is None or flag != trackID[2]:
flag = trackID[2]
if tar is not None:
tar.close()
tar = tarfile.open(name=ftar, mode='r:gz')
if tar is None:
tar = tarfile.open(name=ftar, mode='r:gz')
fdr = tar.extractfile(ftrack)
with open(fnew, 'wb') as fdw:
fdw.write(fdr.read())
fdr.close()
# practical solution: generate bash commands
# workflow: extract .tar.gz -> move files-of-interest -> remove all extracted files; repeat this.
trackIDs = sorted(trackIDs)
cnt = 0
flag = None
fscript = os.path.join(data_dir, 'msd/extract_aotm2011.sh')
with open(fscript, 'w') as fd:
for trackID in trackIDs:
cnt += 1
if cnt % 100 == 0:
sys.stdout.write("\r%d / %d" % (cnt, len(trackIDs))); sys.stdout.flush()
fnew = 'files/' + trackID + '.h5'
if os.path.exists(fnew): continue
ftrack = trackID[2] + '/' + trackID[3] + '/' + trackID[4] + '/' + trackID + '.h5'
if flag is None or flag != trackID[2]:
if flag is not None:
fd.write('rm -rf %s/ \n' % flag)
flag = trackID[2]
fd.write('echo "extracting %s"\n' % flag)
fd.write('tar xzf %s.tar.gz\n' % flag)
fd.write('mv %s %s\n' % (ftrack, fnew))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Aod Plus Ccn
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 13.3. External Mixture
Step59: 14. Optical Radiative Properties --> Radiative Scheme
Step60: 14.2. Shortwave Bands
Step61: 14.3. Longwave Bands
Step62: 15. Optical Radiative Properties --> Cloud Interactions
Step63: 15.2. Twomey
Step64: 15.3. Twomey Minimum Ccn
Step65: 15.4. Drizzle
Step66: 15.5. Cloud Lifetime
Step67: 15.6. Longwave Bands
Step68: 16. Model
Step69: 16.2. Processes
Step70: 16.3. Coupling
Step71: 16.4. Gas Phase Precursors
Step72: 16.5. Scheme Type
Step73: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_aod_plus_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.external_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1 - Gradient Descent
Step4: Expected Output
Step6: Expected Output
Step8: Expected Output
Step10: Expected Output
Step12: Expected Output
Step13: Expected Output
Step15: We have already implemented a 3-layer neural network. You will train it with
Step16: You will now run this 3 layer neural network with each of the 3 optimization methods.
Step17: 5.2 - Mini-batch gradient descent with momentum
Step18: 5.3 - Mini-batch with Adam mode
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] -= learning_rate*grads["dW" + str(l+1)]
parameters["b" + str(l+1)] -= learning_rate*grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size]
mini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, (k+1)*mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, (k+1)*mini_batch_size : m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta*v["dW" + str(l+1)] + (1-beta)*grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta*v["db" + str(l+1)] + (1-beta)*grads["db" + str(l+1)]
# update parameters
parameters["W" + str(l+1)] -= learning_rate*v["dW" + str(l+1)]
parameters["b" + str(l+1)] -= learning_rate*v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
s["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
s["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1*v["dW" + str(l+1)] + (1-beta1)*grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1*v["db" + str(l+1)] + (1-beta1)*grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)]/(1 - (beta1**t))
v_corrected["db" + str(l+1)] = v["db" + str(l+1)]/(1 - (beta1**t))
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2*s["dW" + str(l+1)] + (1-beta2)*np.power(grads["dW" + str(l+1)], 2)
s["db" + str(l+1)] = beta2*s["db" + str(l+1)] + (1-beta2)*np.power(grads["db" + str(l+1)], 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)]/(1 - (beta2**t))
s_corrected["db" + str(l+1)] = s["db" + str(l+1)]/(1 - (beta2**t))
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] -= learning_rate*v_corrected["dW" + str(l+1)]/np.sqrt(s_corrected["dW" + str(l+1)] + epsilon)
parameters["b" + str(l+1)] -= learning_rate*v_corrected["db" + str(l+1)]/np.sqrt(s_corrected["db" + str(l+1)] + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
train_X, train_Y = load_dataset()
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set you bucket
Step2: Verify CSV files exist
Step3: Create Keras model
Step6: Lab Task #2
Step8: Lab Task #3
Step10: Lab Task #4
Step12: Lab Task #5
Step14: Lab Task #6
Step16: Lab Task #7
Step17: We can visualize the DNN using the Keras plot_model utility.
Step18: Run and evaluate model
Step19: Visualize loss curve
Step20: Save the model
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
BUCKET = # REPLACE BY YOUR BUCKET
os.environ['BUCKET'] = BUCKET
TRAIN_DATA_PATH = "gs://{bucket}/babyweight/data/train*.csv".format(bucket=BUCKET)
EVAL_DATA_PATH = "gs://{bucket}/babyweight/data/eval*.csv".format(bucket=BUCKET)
!gsutil ls $TRAIN_DATA_PATH
!gsutil ls $EVAL_DATA_PATH
# Determine CSV, label, and key columns
# TODO: Create list of string column headers, make sure order matches.
CSV_COLUMNS = [""]
# TODO: Add string name for label column
LABEL_COLUMN = ""
# Set default values for each CSV column as a list of lists.
# Treat is_male and plurality as strings.
DEFAULTS = []
def features_and_labels(row_data):
Splits features and labels from feature dictionary.
Args:
row_data: Dictionary of CSV column names and tensor values.
Returns:
Dictionary of feature tensors and label tensor.
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
Loads dataset using the tf.data API from CSV files.
Args:
pattern: str, file pattern to glob into list of files.
batch_size: int, the number of examples per batch.
mode: tf.estimator.ModeKeys to determine if training or evaluating.
Returns:
`Dataset` object.
# TODO: Make a CSV dataset
dataset = tf.data.experimental.make_csv_dataset()
# TODO: Map dataset to features and label
dataset = dataset.map() # features, label
# Shuffle and repeat for training
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=1000).repeat()
# Take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(buffer_size=1)
return dataset
def create_input_layers():
Creates dictionary of input layers for each feature.
Returns:
Dictionary of `tf.Keras.layers.Input` layers for each feature.
# TODO: Create dictionary of tf.keras.layers.Input for each raw feature
inputs = {}
return inputs
def create_feature_columns():
Creates dictionary of feature columns from inputs.
Returns:
Dictionary of feature columns.
# TODO: Create feature columns for numeric features
feature_columns = {}
# TODO: Add feature columns for categorical features
return feature_columns
def get_model_outputs(inputs):
Creates model architecture and returns outputs.
Args:
inputs: Dense tensor used as inputs to model.
Returns:
Dense tensor output from the model.
# TODO: Create two hidden layers of [64, 32] just in like the BQML DNN
# TODO: Create final output layer
return output
def rmse(y_true, y_pred):
Calculates RMSE evaluation metric.
Args:
y_true: tensor, true labels.
y_pred: tensor, predicted labels.
Returns:
Tensor with value of RMSE between true and predicted labels.
# TODO: Calculate RMSE from true and predicted labels
pass
# Build a simple Keras DNN using its Functional API
def build_dnn_model():
Builds simple DNN using Keras Functional API.
Returns:
`tf.keras.models.Model` object.
# Create input layer
inputs = create_input_layers()
# Create feature columns
feature_columns = create_feature_columns()
# The constructor for DenseFeatures takes a list of numeric columns
# The Functional API in Keras requires: LayerConstructor()(inputs)
dnn_inputs = tf.keras.layers.DenseFeatures(
feature_columns=feature_columns.values())(inputs)
# Get output of model given inputs
output = get_model_outputs(dnn_inputs)
# Build model and compile it all together
model = tf.keras.models.Model(inputs=inputs, outputs=output)
# TODO: Add custom eval metrics to list
model.compile(optimizer="adam", loss="mse", metrics=["mse"])
return model
print("Here is our DNN architecture so far:\n")
model = build_dnn_model()
print(model.summary())
tf.keras.utils.plot_model(
model=model, to_file="dnn_model.png", show_shapes=False, rankdir="LR")
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around
NUM_EVALS = 5 # how many times to evaluate
# Enough to get a reasonable sample, but not so much that it slows down
NUM_EVAL_EXAMPLES = 10000
# TODO: Load training dataset
trainds = load_dataset()
# TODO: Load evaluation dataset
evalds = load_dataset().take(count=NUM_EVAL_EXAMPLES // 1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
logdir = os.path.join(
"logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=logdir, histogram_freq=1)
# TODO: Fit model on training dataset and evaluate every so often
history = model.fit()
# Plot
import matplotlib.pyplot as plt
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(["loss", "rmse"]):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history["val_{}".format(key)])
plt.title("model {}".format(key))
plt.ylabel(key)
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left");
OUTPUT_DIR = "babyweight_trained"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(
OUTPUT_DIR, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
tf.saved_model.save(
obj=model, export_dir=EXPORT_PATH) # with default serving function
print("Exported trained model to {}".format(EXPORT_PATH))
!ls $EXPORT_PATH
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creates the constraints, the likelihood function, as well as a function to create random Q-matrix.
Step2: Performs the minimization
|
<ASSISTANT_TASK:>
Python Code:
from dcprogs import read_idealized_bursts
from dcprogs.likelihood import QMatrix
name = "CH82.scn"
tau = 1e-4
tcrit = 4e-3
graph = [["V", "V", "V", 0, 0],
["V", "V", 0, "V", 0],
["V", 0, "V", "V", "V"],
[ 0, "V", "V", "V", 0],
[ 0, 0, "V", 0, "V"]]
nopen = 2
qmatrix = QMatrix([[ -3050, 50, 3000, 0, 0 ],
[ 2./3., -1502./3., 0, 500, 0 ],
[ 15, 0, -2065, 50, 2000 ],
[ 0, 15000, 4000, -19000, 0 ],
[ 0, 0, 10, 0, -10 ] ], 2)
bursts = read_idealized_bursts(name, tau=tau, tcrit=tcrit)
from scipy.optimize import minimize
from numpy import NaN, zeros, arange
import numpy as np
from dcprogs.likelihood.random import qmatrix as random_qmatrix
from dcprogs.likelihood import QMatrix, Log10Likelihood
from dcprogs.likelihood.optimization import reduce_likelihood
likelihood = Log10Likelihood(bursts, nopen, tau, tcrit)
reduced = reduce_likelihood(likelihood, graph)
x = reduced.to_reduced_coords( random_qmatrix(5).matrix )
constraints = []
def create_inequality_constraints(i, value=0e0, sign=1e0):
f = lambda x: sign * (x[i] - value)
def df(x):
a = zeros(x.shape)
a[i] = sign
return a
return f, df
for i in range(len(x)):
f, df = create_inequality_constraints(i)
constraints.append({'type': 'ineq', 'fun': f, 'jac': df})
f, df = create_inequality_constraints(i, 1e4, -1)
constraints.append({'type': 'ineq', 'fun': f, 'jac': df})
def random_starting_point():
from numpy import infty, NaN
from dcprogs.likelihood.random import rate_matrix as random_rate_matrix
for i in range(100):
matrix = random_rate_matrix(N=len(qmatrix.matrix), zeroprob=0)
x = reduced.to_reduced_coords( matrix )
try:
result = reduced(x)
print(result, reduced.to_full_coords(x))
except:
pass
else:
if result != NaN and result != infty and result != -infty: break
else: raise RuntimeError("Could not create random matrix")
return x
def does_not_throw(x):
try: return -reduced(x)
except: return NaN
import math
methods = ['COBYLA', 'SLSQP']
x = random_starting_point()
print ('x=', x)
maxx = (x.copy(), reduced(x))
for i in range(len(methods)):
result = minimize(does_not_throw,
x,
method=methods[i],
constraints=constraints,
options={'maxiter': 1000, 'disp':True})
print(result)
if not math.isnan(result.fun):
if result.fun < maxx[1]: maxx = (x.copy(), result.fun)
if result.success and i > 4: break
x += random_starting_point() * 1e-2
if np.all(np.isnan(x)): x = random_starting_point()
print(maxx[0])
print(maxx[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: gaussian mixture model
Step2: Gaussian Mixture model Fitting (minimization)
Step3: Expectation Minimization
Step4: How EM and minimization compare in terms of parameter estimation
Step5: Performace are quite similar on this example, but this may not always be the case !
Step6: What about automatic inference of k ?
Step7: The proposed/optimal k in terms of Aikike is k=4, which is
Step8: Now the question is how reliable is this estimation of k on 100 experiments ?
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
matplotlib.rcParams['figure.figsize'] = (12,6)
matplotlib.rcParams['figure.dpi'] = 120
matplotlib.style.use('ggplot')
from biokit.stats import mixture
m = mixture.GaussianMixture(mu=[-2, 1], sigma=[0.5,0.5],
mixture=[.2,.8], N=60)
# data is stored in m.data and an histogram can easily be shown:
m.hist()
# In order to figure out the underlying parameters of the distribution
# given in the above sample, we will first use the GaussianMixtureFitting
# class, which minimizes the distribution of the data to a mixture
# of gaussian model. A conveninet class to generate the data is the
# GaussianMixtureModel class, which can be used to plot the final distribution:
gmm = mixture.GaussianMixtureModel(k=2)
X = linspace(-3,3,100)
plot(X, gmm.pdf(X, params=[-1,0.5,0.2,1,0.5,0.8]), 'r')
# now, let us use the following class to figure out the parameters of the
# gaussian models. We have to provide the number of models defined by
# the k=2 parameter
mf = mixture.GaussianMixtureFitting(m.data, k=2)
#
mf.estimate()
mf.plot()
em = mixture.EM(m.data, model=gmm)
em.estimate()
em.plot()
em.results.mus
# to compare to numerical values found with the GaussianMixtureFitting
mf.results.mus
res_mf = []; res_em = []
N = 50
from easydev import Progress
replicates = 100
p = Progress(replicates)
mus = [-2,1]
sigmas = [0.5,0.5]
pis= [.2,0.8]
for i in range(0, replicates):
m = mixture.GaussianMixture(mu=mus, sigma=sigmas, mixture=pis, N=N)
mf = mixture.GaussianMixtureFitting(m.data)
mf.estimate(guess=[-.8,0.5,0.5,.8,0.5,0.5])
res_mf.append(mf.results)
em = mixture.EM(m.data, model=gmm)
em.estimate(guess=[-.8, 0.5, 0.5, .8, 0.5, 0.5])
res_em.append(em.results)
#p.animate(i+1)
subplot(2,3,1)
hist([x.mus[0] for x in res_mf], 20, alpha=0.4);
hist([x.mus[0] for x in res_em], 20, color='r', alpha=0.2)
axvline(mus[0], color='r', lw=2)
subplot(2,3,2)
hist([x.sigmas[0] for x in res_mf], 20, alpha=0.4);
hist([x.sigmas[0] for x in res_em], 20, color='r', alpha=0.2)
axvline(sigmas[0], color='r', lw=2)
subplot(2,3,3)
hist([x.pis[0] for x in res_mf], 20, alpha=0.4);
hist([x.pis[0] for x in res_em], 20, color='r', alpha=0.2)
axvline(pis[0], color='r', lw=2)
subplot(2,3,4)
hist([x.mus[1] for x in res_mf], 20, alpha=0.4);
hist([x.mus[1] for x in res_em], 20, color='r', alpha=0.2)
axvline(mus[1], color='r', lw=2)
subplot(2,3,5)
hist([x.sigmas[1] for x in res_mf], 20, alpha=0.4);
hist([x.sigmas[1] for x in res_em], 20, color='r', alpha=0.2)
axvline(sigmas[1], color='r', lw=2)
subplot(2,3,6)
hist([x.pis[1] for x in res_mf], 20, alpha=0.4);
hist([x.pis[1] for x in res_em], 20, color='r', alpha=0.2)
axvline(pis[1], color='r', lw=2)
m = mixture.GaussianMixture(mu=[-1, 1], sigma=[0.5,0.5],
mixture=[.2,.8], N=60)
# if we know that k=2, it is easy to estimate the parameters
res = mf.estimate(k=2)
mf.plot()
amf = mixture.AdaptativeMixtureFitting(m.data)
amf.diagnostic(kmin=1, kmax=8)
amf.fitting = mixture.EM(m.data)
amf.run(kmin=1, kmax=8)
amf.diagnostic(k=2)
plot(amf.x[0:8], [amf.all_results[i]['AIC'] for i in amf.x[0:8]], 'or-')
plot(amf.x[0:8], [amf.all_results[i]['AICc'] for i in amf.x[0:8]], 'og-')
bestk_em = []
aics_em = []
bestk_m = []
aics_m = []
replicates = 100
p = Progress(replicates)
for i in range(0, replicates):
m = mixture.GaussianMixture(mu=[-1, 1], sigma=[0.5,0.5],
mixture=[.2,.8], N=1000)
amf = mixture.AdaptativeMixtureFitting(m.data)
amf.verbose = False
amf.fitting = mixture.EM(m.data)
amf.run()
bestk_em.append(amf.best_k)
aics_em.append(amf.fitting.results.AIC)
amf = mixture.AdaptativeMixtureFitting(m.data)
amf.verbose = False
amf.run()
bestk_m.append(amf.best_k)
aics_m.append(amf.fitting.results.AIC)
#p.animate(i+1)
res = hist([bestk_em, bestk_m])
legend(['EM', 'minimization'])
hist([aics_em, aics_m])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
xmin = np.min(image_data.reshape(-1,1))
xmax = np.max(image_data.reshape(-1,1))
a = 0.1; b = 0.9;
x = a + (image_data - xmin)*(b-a)/(xmax-xmin)
return x
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
# features =
# labels
features = tf.placeholder(tf.float32,(None,features_count))
labels = tf.placeholder(tf.float32,(None,labels_count))
# TODO: Set the weights and biases tensors
# weights =
# biases =
dd = tf.truncated_normal((features_count,labels_count))
weights = tf.Variable(dd,dtype=tf.float32)
biases = tf.Variable(tf.zeros((labels_count)))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
# epochs =
# learning_rate =
epochs = 10
learning_rate = 0.01
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
valid_acc_batch
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_,l = session.run([optimizer,loss], feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
test_accuracy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data and set Hyperparameters
Step2: Load and format the data
Step3: We also convert the data into the flattened format. This converts the data into a 2D array, which allows the data to be passed into many ML packages that require a two-dimensional dataset. In particular, this is the format accepted by the Diffusion Atlas object. Trajectory start/stop points are then stored in the traj_edges array.
Step4: Construct DGA MFPT by increasing lag times
Step5: Construct DGA MFPT with increasing Delay Embedding
Step6: Plot the Results
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pyedgar
from pyedgar.data_manipulation import tlist_to_flat, flat_to_tlist, delay_embed, lift_function
%matplotlib inline
ntraj = 700
trajectory_length = 40
lag_values = np.arange(1, 37, 2)
embedding_values = lag_values[1:] - 1
trajs_2d = np.load('data/muller_brown_trajs.npy')[:ntraj, :trajectory_length] # Raw trajectory
trajs = trajs_2d[:, :, 1] # Only keep y coordinate
stateA = (trajs > 1.15).astype('float')
stateB = (trajs < 0.15).astype('float')
# Convert to list of trajectories format
trajs = [traj_i.reshape(-1, 1) for traj_i in trajs]
stateA = [A_i for A_i in stateA]
stateB = [B_i for B_i in stateB]
# Load the true results
true_mfpt = np.load('data/htAB_1_0_0_1.npy')
flattened_trajs, traj_edges = tlist_to_flat(trajs)
flattened_stateA = np.hstack(stateA)
flattened_stateB = np.hstack(stateB)
print("Flattened Shapes are: ", flattened_trajs.shape, flattened_stateA.shape, flattened_stateB.shape,)
# Build the basis set
diff_atlas = pyedgar.basis.DiffusionAtlas.from_sklearn(alpha=0, k=500, bandwidth_type='-1/d', epsilon='bgh_generous')
diff_atlas.fit(flattened_trajs)
flat_basis = diff_atlas.make_dirichlet_basis(200, in_domain=(1. - flattened_stateA))
basis = flat_to_tlist(flat_basis, traj_edges)
flat_basis_no_boundaries = diff_atlas.make_dirichlet_basis(200)
basis_no_boundaries = flat_to_tlist(flat_basis_no_boundaries, traj_edges)
# Perform DGA calculation
mfpt_BA_lags = []
for lag in lag_values:
mfpt = pyedgar.galerkin.compute_mfpt(basis, stateA, lag=lag)
pi = pyedgar.galerkin.compute_change_of_measure(basis_no_boundaries, lag=lag)
flat_pi = np.array(pi).ravel()
flat_mfpt = np.array(mfpt).ravel()
mfpt_BA = np.mean(flat_mfpt * flat_pi * np.array(stateB).ravel()) / np.mean(flat_pi * np.array(stateB).ravel())
mfpt_BA_lags.append(mfpt_BA)
mfpt_BA_embeddings = []
for lag in embedding_values:
# Perform delay embedding
debbed_traj = delay_embed(trajs, n_embed=lag)
lifted_A = lift_function(stateA, n_embed=lag)
lifted_B = lift_function(stateB, n_embed=lag)
flat_debbed_traj, embed_edges = tlist_to_flat(debbed_traj)
flat_lifted_A = np.hstack(lifted_A)
# Build the basis
diff_atlas = pyedgar.basis.DiffusionAtlas.from_sklearn(alpha=0, k=500, bandwidth_type='-1/d',
epsilon='bgh_generous', neighbor_params={'algorithm':'brute'})
diff_atlas.fit(flat_debbed_traj)
flat_deb_basis = diff_atlas.make_dirichlet_basis(200, in_domain=(1. - flat_lifted_A))
deb_basis = flat_to_tlist(flat_deb_basis, embed_edges)
flat_pi_basis = diff_atlas.make_dirichlet_basis(200)
pi_basis = flat_to_tlist(flat_deb_basis, embed_edges)
# Construct the Estimate
deb_mfpt = pyedgar.galerkin.compute_mfpt(deb_basis, lifted_A, lag=1)
pi = pyedgar.galerkin.compute_change_of_measure(pi_basis)
flat_pi = np.array(pi).ravel()
flat_mfpt = np.array(deb_mfpt).ravel()
deb_mfpt_BA = np.mean(flat_mfpt * flat_pi * np.array(lifted_B).ravel()) / np.mean(flat_pi * np.array(lifted_B).ravel())
mfpt_BA_embeddings.append(deb_mfpt_BA)
plt.plot(embedding_values, mfpt_BA_embeddings, label="Delay Embedding")
plt.plot(lag_values, mfpt_BA_lags, label="Lags")
plt.axhline(true_mfpt[0] * 10, color='k', label='True')
plt.axhline((true_mfpt[0] + true_mfpt[1]) * 10., color='k', linestyle=':')
plt.axhline((true_mfpt[0] - true_mfpt[1]) * 10., color='k', linestyle=':')
plt.legend()
plt.ylim(0, 100)
plt.xlabel("Lag / Delay Length")
plt.ylabel("Estimated MFPT")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: Neural network classes for testing
Step9: There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.
Step10: Comparisons between identical networks, with and without batch normalization
Step11: As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.
Step12: As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note
Step13: With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches.
Step14: Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.
Step15: In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.
Step16: In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.
Step17: As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.
Step18: With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.
Step19: Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.
Step20: In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose randome values with a standard deviation of 5. If you were really training a neural network, you would not want to do this. But these examples demonstrate how batch normalization makes your network much more resilient.
Step21: As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them.
Step22: Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all.
Step23: The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.
Step24: Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.
Step25: We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.
Step26: In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.
Step27: When we used these same parameters earlier, we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)
Step29: When we trained with these parameters and batch normalization earlier, we reached 90% validation accuracy. However, this time the network almost starts to make some progress in the beginning, but it quickly breaks down and stops learning.
Step31: This version of fully_connected is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points
Step32: In the following cell, we pass True for test_training_accuracy, which performs the same batch normalization that we normally perform during training.
Step33: As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance of that batch. The "batches" we are using for these predictions have a single input each time, so their values are the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer.
|
<ASSISTANT_TASK:>
Python Code:
# Import necessary packages
import tensorflow as tf
import tqdm
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Import MNIST data so we have something for our experiments
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class NeuralNet:
def __init__(self, initial_weights, activation_fn, use_batch_norm):
Initializes this object, creating a TensorFlow graph using the given parameters.
:param initial_weights: list of NumPy arrays or Tensors
Initial values for the weights for every layer in the network. We pass these in
so we can create multiple networks with the same starting weights to eliminate
training differences caused by random initialization differences.
The number of items in the list defines the number of layers in the network,
and the shapes of the items in the list define the number of nodes in each layer.
e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would
create a network with 784 inputs going into a hidden layer with 256 nodes,
followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
# Keep track of whether or not this network uses batch normalization.
self.use_batch_norm = use_batch_norm
self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm"
# Batch normalization needs to do different calculations during training and inference,
# so we use this placeholder to tell the graph which behavior to use.
self.is_training = tf.placeholder(tf.bool, name="is_training")
# This list is just for keeping track of data we want to plot later.
# It doesn't actually have anything to do with neural nets or batch normalization.
self.training_accuracies = []
# Create the network graph, but it will not actually have any real values until after you
# call train or test
self.build_network(initial_weights, activation_fn)
def build_network(self, initial_weights, activation_fn):
Build the graph. The graph still needs to be trained via the `train` method.
:param initial_weights: list of NumPy arrays or Tensors
See __init__ for description.
:param activation_fn: Callable
See __init__ for description.
self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])
layer_in = self.input_layer
for weights in initial_weights[:-1]:
layer_in = self.fully_connected(layer_in, weights, activation_fn)
self.output_layer = self.fully_connected(layer_in, initial_weights[-1])
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
# Since this class supports both options, only use batch normalization when
# requested. However, do not use it on the final layer, which we identify
# by its lack of an activation function.
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
# (See later in the notebook for more details.)
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
# Apply batch normalization to the linear combination of the inputs and weights
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# Now apply the activation function, *after* the normalization.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):
Trains the model on the MNIST training dataset.
:param session: Session
Used to run training graph operations.
:param learning_rate: float
Learning rate used during gradient descent.
:param training_batches: int
Number of batches to train.
:param batches_per_sample: int
How many batches to train before sampling the validation accuracy.
:param save_model_as: string or None (default None)
Name to use if you want to save the trained model.
# This placeholder will store the target labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define loss and optimizer
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if self.use_batch_norm:
# If we don't include the update ops as dependencies on the train step, the
# tf.layers.batch_normalization layers won't update their population statistics,
# which will cause the model to fail at inference time
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# Train for the appropriate number of batches. (tqdm is only for a nice timing display)
for i in tqdm.tqdm(range(training_batches)):
# We use batches of 60 just because the original paper did. You can use any size batch you like.
batch_xs, batch_ys = mnist.train.next_batch(60)
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
# Periodically test accuracy against the 5k validation images and store it for plotting later.
if i % batches_per_sample == 0:
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
self.training_accuracies.append(test_accuracy)
# After training, report accuracy against test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))
# If you want to use this model later for inference instead of having to retrain it,
# just construct it with the same parameters and then pass this file to the 'test' function
if save_model_as:
tf.train.Saver().save(session, save_model_as)
def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):
Trains a trained model on the MNIST testing dataset.
:param session: Session
Used to run the testing graph operations.
:param test_training_accuracy: bool (default False)
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
Note: in real life, *always* perform inference using the population mean and variance.
This parameter exists just to support demonstrating what happens if you don't.
:param include_individual_predictions: bool (default True)
This function always performs an accuracy test against the entire test set. But if this parameter
is True, it performs an extra test, doing 200 predictions one at a time, and displays the results
and accuracy.
:param restore_from: string or None (default None)
Name of a saved model if you want to test with previously saved weights.
# This placeholder will store the true labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# If provided, restore from a previously saved model
if restore_from:
tf.train.Saver().restore(session, restore_from)
# Test against all of the MNIST test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,
labels: mnist.test.labels,
self.is_training: test_training_accuracy})
print('-'*75)
print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))
# If requested, perform tests predicting individual values rather than batches
if include_individual_predictions:
predictions = []
correct = 0
# Do 200 predictions, 1 at a time
for i in range(200):
# This is a normal prediction using an individual test case. However, notice
# we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.
# Remember that will tell it whether it should use the batch mean & variance or
# the population estimates that were calucated while training the model.
pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],
feed_dict={self.input_layer: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
self.is_training: test_training_accuracy})
correct += corr
predictions.append(pred[0])
print("200 Predictions:", predictions)
print("Accuracy on 200 samples:", correct/200)
def plot_training_accuracies(*args, **kwargs):
Displays a plot of the accuracies calculated during training to demonstrate
how many iterations it took for the model(s) to converge.
:param args: One or more NeuralNet objects
You can supply any number of NeuralNet objects as unnamed arguments
and this will display their training accuracies. Be sure to call `train`
the NeuralNets before calling this function.
:param kwargs:
You can supply any named parameters here, but `batches_per_sample` is the only
one we look for. It should match the `batches_per_sample` value you passed
to the `train` function.
fig, ax = plt.subplots()
batches_per_sample = kwargs['batches_per_sample']
for nn in args:
ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),
nn.training_accuracies, label=nn.name)
ax.set_xlabel('Training steps')
ax.set_ylabel('Accuracy')
ax.set_title('Validation Accuracy During Training')
ax.legend(loc=4)
ax.set_ylim([0,1])
plt.yticks(np.arange(0, 1.1, 0.1))
plt.grid(True)
plt.show()
def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):
Creates two networks, one with and one without batch normalization, then trains them
with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.
:param use_bad_weights: bool
If True, initialize the weights of both networks to wildly inappropriate weights;
if False, use reasonable starting weights.
:param learning_rate: float
Learning rate used during gradient descent.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param training_batches: (default 50000)
Number of batches to train.
:param batches_per_sample: (default 500)
How many batches to train before sampling the validation accuracy.
# Use identical starting weights for each network to eliminate differences in
# weight initialization as a cause for differences seen in training performance
#
# Note: The networks will use these weights to define the number of and shapes of
# its layers. The original batch normalization paper used 3 hidden layers
# with 100 nodes in each, followed by a 10 node output layer. These values
# build such a network, but feel free to experiment with different choices.
# However, the input size should always be 784 and the final output should be 10.
if use_bad_weights:
# These weights should be horrible because they have such a large standard deviation
weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,10), scale=5.0).astype(np.float32)
]
else:
# These weights should be good because they have such a small standard deviation
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
# Just to make sure the TensorFlow's default graph is empty before we start another
# test, because we don't bother using different graphs or scoping and naming
# elements carefully in this sample code.
tf.reset_default_graph()
# build two versions of same network, 1 without and 1 with batch normalization
nn = NeuralNet(weights, activation_fn, False)
bn = NeuralNet(weights, activation_fn, True)
# train and test the two models
with tf.Session() as sess:
tf.global_variables_initializer().run()
nn.train(sess, learning_rate, training_batches, batches_per_sample)
bn.train(sess, learning_rate, training_batches, batches_per_sample)
nn.test(sess)
bn.test(sess)
# Display a graph of how validation accuracies changed during training
# so we can compare how the models trained and when they converged
plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)
train_and_test(False, 0.01, tf.nn.relu)
train_and_test(False, 0.01, tf.nn.relu, 2000, 50)
train_and_test(False, 0.01, tf.nn.sigmoid)
train_and_test(False, 1, tf.nn.relu)
train_and_test(False, 1, tf.nn.relu)
train_and_test(False, 1, tf.nn.sigmoid)
train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)
train_and_test(False, 2, tf.nn.relu)
train_and_test(False, 2, tf.nn.sigmoid)
train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)
train_and_test(True, 0.01, tf.nn.relu)
train_and_test(True, 0.01, tf.nn.sigmoid)
train_and_test(True, 1, tf.nn.relu)
train_and_test(True, 1, tf.nn.sigmoid)
train_and_test(True, 2, tf.nn.relu)
train_and_test(True, 2, tf.nn.sigmoid)
train_and_test(True, 1, tf.nn.relu)
train_and_test(True, 2, tf.nn.relu)
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
num_out_nodes = initial_weights.shape[-1]
# Batch normalization adds additional trainable variables:
# gamma (for scaling) and beta (for shifting).
gamma = tf.Variable(tf.ones([num_out_nodes]))
beta = tf.Variable(tf.zeros([num_out_nodes]))
# These variables will store the mean and variance for this layer over the entire training set,
# which we assume represents the general population distribution.
# By setting `trainable=False`, we tell TensorFlow not to modify these variables during
# back propagation. Instead, we will assign values to these variables ourselves.
pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)
# Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.
# This is the default value TensorFlow uses.
epsilon = 1e-3
def batch_norm_training():
# Calculate the mean and variance for the data coming out of this layer's linear-combination step.
# The [0] defines an array of axes to calculate over.
batch_mean, batch_variance = tf.nn.moments(linear_output, [0])
# Calculate a moving average of the training data's mean and variance while training.
# These will be used during inference.
# Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter
# "momentum" to accomplish this and defaults it to 0.99
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean'
# and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.
# This is necessary because the those two operations are not actually in the graph
# connecting the linear_output and batch_normalization layers,
# so TensorFlow would otherwise just skip them.
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
# During inference, use the our estimated population mean and variance to normalize the layer
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
# Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute
# the operation returned from `batch_norm_training`; otherwise it will execute the graph
# operation returned from `batch_norm_inference`.
batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)
# Pass the batch-normalized layer output through the activation function.
# The literature states there may be cases where you want to perform the batch normalization *after*
# the activation function, but it is difficult to find any uses of that in practice.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
def batch_norm_test(test_training_accuracy):
:param test_training_accuracy: bool
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
tf.reset_default_graph()
# Train the model
bn = NeuralNet(weights, tf.nn.relu, True)
# First train the network
with tf.Session() as sess:
tf.global_variables_initializer().run()
bn.train(sess, 0.01, 2000, 2000)
bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)
batch_norm_test(True)
batch_norm_test(False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Data
Step2: Check the head of customers, and check out its info() and describe() methods.
Step3: Exploratory Data Analysis
Step4: Do the same but with the Time on App column instead.
Step5: Use jointplot to create a 2D hex bin plot comparing Time on App and Length of Membership.
Step6: Let's explore these types of relationships across the entire data set. Use pairplot to recreate the plot below.(Don't worry about the the colors)
Step7: Based off this plot what looks to be the most correlated feature with Yearly Amount Spent?
Step8: Create a linear model plot (using seaborn's lmplot) of Yearly Amount Spent vs. Length of Membership.
Step9: Training and Testing Data
Step10: Use model_selection.train_test_split from sklearn to split the data into training and testing sets. Set test_size=0.3 and random_state=101
Step11: Training the Model
Step12: Create an instance of a LinearRegression() model named lm.
Step13: Train/fit lm on the training data.
Step14: Print out the coefficients of the model
Step15: Predicting Test Data
Step16: Create a scatterplot of the real test values versus the predicted values.
Step17: Evaluating the Model
Step18: Residuals
Step19: Conclusion
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
customers = pd.read_csv("Ecommerce Customers")
customers.head()
customers.describe()
customers.info()
sns.set_palette("GnBu_d")
sns.set_style('whitegrid')
# More time on site, more money spent.
sns.jointplot(x='Time on Website',y='Yearly Amount Spent',data=customers)
sns.jointplot(x='Time on App',y='Yearly Amount Spent',data=customers)
sns.jointplot(x='Time on App',y='Length of Membership',kind='hex',data=customers)
sns.pairplot(customers)
# Length of Membership
sns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data=customers)
y = customers['Yearly Amount Spent']
X = customers[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train,y_train)
# The coefficients
print('Coefficients: \n', lm.coef_)
predictions = lm.predict( X_test)
plt.scatter(y_test,predictions)
plt.xlabel('Y Test')
plt.ylabel('Predicted Y')
# calculate these metrics by hand!
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
sns.distplot((y_test-predictions),bins=50);
coeffecients = pd.DataFrame(lm.coef_,X.columns)
coeffecients.columns = ['Coeffecient']
coeffecients
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 다음으로 이 데이터에서 중복을 허락하여 N개의 데이터를 선택한 후 다시 회귀 분석을 한다. 이론적으로 $2^{100}$개의 경우가 있지만 1,000번만 반복해 본다. N은 임의로 정해둔 것이다.
Step2: 전체 가중치 집합을 히스토그램으로 나타내면 다음과 같다.
Step3: 평균과 분산은 다음과 같다.
Step4: 가중치 중 상수항의 경우 평균은 -1.6이지만 표분 편차가 $\sqrt{4.81}=2.19$이므로 0일 가능성을 배제할 수 없다.
Step5: 보고서의 std err 항목을 보면 표준 편차의 경우 2.163 이고 마지막의 신뢰 구간(confidence interval)이 -5.920 ~ 2.663 임을 보이고 있다. 부트스트래핑으로 얻은 결과와 유사하다. 이 결과는 다음에 설명할 확률론적 가정에 의해 계산된 값이다.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_regression
X0, y, coef = make_regression(n_samples=100, n_features=1, noise=20, coef=True, random_state=0)
dfX0 = pd.DataFrame(X0, columns=["X1"])
dfX = sm.add_constant(dfX0)
dfy = pd.DataFrame(y, columns=["y"])
model = sm.OLS(dfy, dfX)
result = model.fit()
print(result.params)
N = 1000
params_c = np.zeros(N)
params_x1 = np.zeros(N)
for i in range(N):
idx = np.random.choice(len(dfy), len(dfy), replace=True) #replace=True: 중복을 허용한다는 의미
dfX2 = dfX.ix[idx, :]
dfy2 = dfy.ix[idx]
r = sm.OLS(dfy2, dfX2).fit()
params_c[i] = r.params.const
params_x1[i] = r.params.X1
ax1 = plt.subplot(121)
sns.distplot(params_c, ax=ax1)
plt.axvline(params_c.mean(), c='r')
plt.title("const parameter")
ax2 = plt.subplot(122)
sns.distplot(params_x1, ax=ax2)
plt.axvline(params_x1.mean(), c='r')
plt.title("x1 parameter")
plt.show()
sp.stats.describe(params_c)
sp.stats.describe(params_x1)
print(result.summary())
sp.stats.probplot(result.resid, plot=plt)
plt.show()
plt.plot(X0, result.resid, 'o')
plt.axhline(y=0, c='k')
plt.xlabel('X1')
plt.ylabel('Residual')
plt.show()
import statsmodels.stats.api as sms
test = sms.omni_normtest(result.resid)
for x in zip(['Chi^2', 'P-value'], test):
print("%-12s: %6.3f" % x)
test = sms.jarque_bera(result.resid)
for x in zip(['Jarque-Bera', 'P-value', 'Skew', 'Kurtosis'], test):
print("%-12s: %6.3f" % x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: b. Accessing Series Elements.
Step3: c. Boolean Indexing.
Step4: Exercise 2
Step5: b. Resampling
Step6: Exercise 3
Step7: Exercise 4
Step8: b. Series Operations
Step9: Exercise 5
Step10: b. DataFrames Manipulation
Step11: Exercise 6
Step12: Exercise 7
Step13: b. DataFrame Manipulation (again)
Step14: Exercise 8
|
<ASSISTANT_TASK:>
Python Code:
# Useful Functions
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
l = np.random.randint(1,100, size=1000)
s = pd.Series(l)
## Your code goes here
## Your code goes here
## Your code goes here
## Your code goes here
## Your code goes here
## Your code goes here
symbol = "CMG"
start = "2012-01-01"
end = "2016-01-01"
prices = get_pricing(symbol, start_date=start, end_date=end, fields="price")
## Your code goes here
## Your code goes here
## Your code goes here
## Your code goes here
print "Summary Statistics"
## Your code goes here
data = get_pricing('GE', fields='open_price', start_date='2016-01-01', end_date='2017-01-01')
## Your code goes here
## Your code goes here
# Rolling mean
## Your code goes here
## Your code goes here
# Rolling Standard Deviation
## Your code goes here
## Your code goes here
l = {'fifth','fourth', 'third', 'second', 'first'}
dict_data = {'a' : [1, 2, 3, 4, 5], 'b' : ['L', 'K', 'J', 'M', 'Z'],'c' : np.random.normal(0, 1, 5)}
## Your code goes here
s1 = pd.Series([2, 3, 5, 7, 11, 13], name='prime')
s2 = pd.Series([1, 4, 6, 8, 9, 10], name='other')
## Your code goes here
## Your code goes here
## Your code goes here
symbol = ["XOM", "BP", "COP", "TOT"]
start = "2012-01-01"
end = "2016-01-01"
prices = get_pricing(symbol, start_date=start, end_date=end, fields="price")
if isinstance(symbol, list):
prices.columns = map(lambda x: x.symbol, prices.columns)
else:
prices.name = symbol
# Check Type of Data for these two.
prices.XOM.head()
prices.loc[:, 'XOM'].head()
## Your code goes here
## Your code goes here
## Your code goes here
# Filter the data for prices to only print out values where
# BP > 30
# XOM < 100
# BP > 30 AND XOM < 100
# The union of (BP > 30 AND XOM < 100) with TOT being non-nan
## Your code goes here
# Add a column for TSLA and drop the column for XOM
## Your code goes here
# Concatenate these dataframes
df_1 = get_pricing(['SPY', 'VXX'], start_date=start, end_date=end, fields='price')
df_2 = get_pricing(['MSFT', 'AAPL', 'GOOG'], start_date=start, end_date=end, fields='price')
## Your code goes here
# Fill GOOG missing data with 0
## Your code goes here
# Print a summary of the 'prices' times series.
## Your code goes here
# Print the natural log returns of the first 10 values
## Your code goes here
# Print the Muliplicative returns
## Your code goes here
# Normlalize the returns and plot
## Your code goes here
# Rolling mean
## Your code goes here
# Rolling standard deviation
## Your code goes here
# Plotting
## Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Clean data
Step2: There is no missing data in the dataset.
Step3: Warning.
Step4: Note that 402 rows have the mode value of '1'.
Step5: Model
Step6: Take home message
Step7: Take home messages
Step8: As said before, I use a random forest classifier since this works best for umbalanced classes
Step9: WOW, the model performed great with default parameters.
Step11: The model barely improves its accuracy.
Step12: The learning curve is oscillating by 0.02
|
<ASSISTANT_TASK:>
Python Code:
# 1 Read dataset
cols = [
'clump thickness',
'uniformity of cell size',
'uniformity of cell shape',
'marginal adhesion',
'single epithelial cell size',
'bare nuclei',
'bland chromatin',
'normal nucleoli',
'mitoses',
'class']
df = pd.read_csv('breast-cancer-wisconsin.data',index_col=0,header=None)
df.index.name = 'id number'
df.columns=cols
df.head()
# Change class labels to 0 and 1 for simplicity
df['class']=df['class'].apply(lambda x: 0 if x == 2 else 1 )
# Is there missing data or NaNs?
df_size = len(df)
df_null = df.isnull().values.any()
#print("Data frame size {}, missing data in {}".format(df_size,len(df_null)))
df_null
#No null values
# See if there are strange values in the dataset:
def visualize_unique_values(df):
# Loop over each column
print('Column','Unique_values')
for icol in range(len(cols)):
# Select a column
df_col = df.ix[:,icol]
# Check unique values
unique_values = df_col.unique()
print(cols[icol],unique_values)
visualize_unique_values(df)
bare_nuclei = df['bare nuclei']
# 1 get data frame with all non missing data:
df2 = bare_nuclei.loc[bare_nuclei != '?']
print(len(df2),len(df))
# Get the mode value
col_mode=eval(df2.mode().values[0])
print("Mode :", col_mode)
#Verify:
bare_nuclei.value_counts()
# Convert data to mode:
df2 = bare_nuclei.apply(lambda x: col_mode if x == '?' else int(x) )
#Check it worked:
print(df2.unique())
# Replace dataset column with clean data
df['bare nuclei'] = df2
# Check this actually worked
visualize_unique_values(df)
y = df['class']
X = df.copy()
del X['class']
class1 = y[y == 0]
class2 = y[y == 1]
print("Class balance\n Class 0: {}\n Class 1: {}\n Ratio: {}".format(len(class1),len(class1),len(class1)/len(class2)))
%matplotlib inline
import seaborn as sns
sns.pairplot(df, hue="class")
#df.head(1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=2, stratify= y )
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
model = RandomForestClassifier()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test,y_pred))
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
# Max number of features by default is sqrt(n_features), which is good to keep to prevent from overfitting.
#If “auto”, then max_features=sqrt(n_features).
rfc = RandomForestClassifier()
params = {'criterion': ['gini','entropy'],'n_estimators': range(10, 50, 10)}
searcher = RandomizedSearchCV(rfc, params, n_jobs=-1, n_iter=4,scoring='f1')
searcher.fit(X_train, y_train) #assuming X and y are your data
import numpy as np
# Utility function to report best scores
def report(results, n_top=1):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
report(searcher.cv_results_)
model = RandomForestClassifier(n_estimators=30, max_depth = None)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test,y_pred))
from sklearn.metrics import f1_score
n_steps=10
step = int(len(X_train)/n_steps)
results=[]
for ix in range(n_steps):
size_train = step + ix * step
model.fit(X_train[:size_train],y_train[:size_train])
y_pred = model.predict(X_test)
score = f1_score(y_test,y_pred)
results.append([size_train,score])
results = np.array(results)
import matplotlib.pyplot as plt
plt.plot(results[:,0],results[:,1])
plt.xlabel('Training set size')
plt.ylabel('F1 score')
plt.title('Learning curve')
plt.grid(True)
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
class_names=[0,1]
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
from scipy import interp
# #############################################################################
# Data IO and generation
# Import some data to play with
X = X_train
y = y_train
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
#random_state = np.random.RandomState(0)
#X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = RandomForestClassifier(n_estimators=30, random_state = 0)
#svm.SVC(kernel='linear', probability=True,
# random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# Confusion matrix
from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Creating an instance of the solow.Model class
Step2: Examples
Step3: 1.2 Defining model parameters
Step4: In addition to the standard parameters $g, n, s, \delta$, one will also need to specify any required parameters for the production function. In order to make sure that parameter values are consistent with the models assumptions some basic validation of the solow.Model.params attribute is done when ever the attribute is set.
Step5: Examples
Step6: 1.3 Other attributes of the solow.Model class
Step7: One can numerically evaluate the intensive output for various values of capital stock (per unit effective labor) as follows...
Step8: The marginal product of capital
Step9: One can numerically evaluate the marginal product of capital for various values of capital stock (per unit effective labor) as follows...
Step10: Equation of motion for capital (per unit effective labor)
Step11: One can numerically evaluate the equation of motion for capital (per unit effective labor) for various values of capital stock (per unit effective labor) as follows...
Step12: 1.4 Sub-classing the solow.Model class
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sympy as sym
import solowpy
solow.Model.output?
# define model variables
A, K, L = sym.symbols('A, K, L')
# define production parameters
alpha, sigma = sym.symbols('alpha, sigma')
# define a production function
cobb_douglas_output = K**alpha * (A * L)**(1 - alpha)
rho = (sigma - 1) / sigma
ces_output = (alpha * K**rho + (1 - alpha) * (A * L)**rho)**(1 / rho)
solow.Model.params?
# these parameters look fishy...why?
default_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.0, 'n': -0.03, 's': 0.15,
'delta': 0.01, 'alpha': 0.33}
# ...raises an AttributeError
model = solowpy.Model(output=cobb_douglas_output, params=default_params)
cobb_douglas_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,
'delta': 0.05, 'alpha': 0.33}
cobb_douglas_model = solow.Model(output=cobb_douglas_output,
params=cobb_douglas_params)
ces_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,
'delta': 0.05, 'alpha': 0.33, 'sigma': 0.95}
ces_model = solowpy.Model(output=ces_output, params=ces_params)
solowpy.Model.intensive_output?
ces_model.intensive_output
ces_model.evaluate_intensive_output(np.linspace(1.0, 10.0, 25))
solowpy.Model.marginal_product_capital?
ces_model.marginal_product_capital
ces_model.evaluate_mpk(np.linspace(1.0, 10.0, 25))
solowpy.Model.k_dot?
ces_model.k_dot
ces_model.evaluate_k_dot(np.linspace(1.0, 10.0, 25))
solowpy.cobb_douglas?
cobb_douglas_model = solowpy.CobbDouglasModel(params=cobb_douglas_params)
solowpy.ces?
ces_model = solowpy.CESModel(params=ces_params)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batch Normalization
Step2: Batch normalization
Step3: Batch Normalization
Step4: Batch Normalization
Step5: Fully Connected Nets with Batch Normalization
Step6: Batchnorm for deep networks
Step7: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
Step8: Batch normalization and initialization
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print 'Before batch normalization:'
print ' means: ', a.mean(axis=0)
print ' stds: ', a.std(axis=0)
# Means should be close to zero and stds close to one
print 'After batch normalization (gamma=1, beta=0)'
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print ' mean: ', a_norm.mean(axis=0)
print ' std: ', a_norm.std(axis=0)
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print 'After batch normalization (nontrivial gamma, beta)'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print 'After batch normalization (test-time):'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dgamma error: ', rel_error(da_num, dgamma)
print 'dbeta error: ', rel_error(db_num, dbeta)
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print 'dx difference: ', rel_error(dx1, dx2)
print 'dgamma difference: ', rel_error(dgamma1, dgamma2)
print 'dbeta difference: ', rel_error(dbeta1, dbeta2)
print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
if reg == 0: print
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, read the (sample) input tables for matching purposes.
Step2: Then, split the labeled data into development set and evaluation set. Use the development set to select the best learning-based matcher
Step3: Creating and Using a Rule-Based Matcher
Step4: Creating Features
Step5: We observe that there were 20 features generated. As a first step, lets say that we decide to use only 'year' related features.
Step6: Adding Rules
Step7: Using the Matcher to Predict Results
|
<ASSISTANT_TASK:>
Python Code:
# Import py_entitymatching package
import py_entitymatching as em
import os
import pandas as pd
# Get the datasets directory
datasets_dir = em.get_install_path() + os.sep + 'datasets'
path_A = datasets_dir + os.sep + 'dblp_demo.csv'
path_B = datasets_dir + os.sep + 'acm_demo.csv'
path_labeled_data = datasets_dir + os.sep + 'labeled_data_demo.csv'
A = em.read_csv_metadata(path_A, key='id')
B = em.read_csv_metadata(path_B, key='id')
# Load the pre-labeled data
S = em.read_csv_metadata(path_labeled_data,
key='_id',
ltable=A, rtable=B,
fk_ltable='ltable_id', fk_rtable='rtable_id')
S.head()
# Split S into I an J
IJ = em.split_train_test(S, train_proportion=0.5, random_state=0)
I = IJ['train']
J = IJ['test']
brm = em.BooleanRuleMatcher()
# Generate a set of features
F = em.get_features_for_matching(A, B, validate_inferred_attr_types=False)
F.feature_name
# Add two rules to the rule-based matcher
# The first rule has two predicates, one comparing the titles and the other looking for an exact match of the years
brm.add_rule(['title_title_lev_sim(ltuple, rtuple) > 0.4', 'year_year_exm(ltuple, rtuple) == 1'], F)
# This second rule compares the authors
brm.add_rule(['authors_authors_lev_sim(ltuple, rtuple) > 0.4'], F)
brm.get_rule_names()
# Rules can also be deleted from the rule-based matcher
brm.delete_rule('_rule_1')
brm.predict(S, target_attr='pred_label', append=True)
S
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2021-04-29T14:20:00'
tend = '2021-04-29T23:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
orbits
# Get the solar parameter
from sunpy.coordinates import sun
angular_size = sun.angular_radius(t='now')
dx = angular_size.arcsec
print(dx)
sun_pa = planning.get_nustar_roll(tstart, 0.)
pa = planning.get_nustar_roll(tstart, 45*u.deg)
print(tstart)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
# Orbit 1 (AR)
offset = [0, 0.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
# Orbit 1 (AR)
offset = [900, -300.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
# Orbit 2a (AR)
offset = [900, -300.]*u.arcsec
ind = 2
orbit = orbits[1].copy()
print(orbit)
oldend = orbit[1]
newend = orbit[1] - TimeDelta(15*60., format = 'sec') # 15 minutes early
orbit[1] = newend
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print(f"Orbit: {ind}a")
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
# Orbit 2b (small thing)
offset = [900, -300.]*u.arcsec
orbit[0] = newend
orbit[1] = oldend
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print(f"Orbit: {ind}b")
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
# Orbit 3, 4 (PSP Footpoints)
offset = [900, -300.]*u.arcsec
for oi in [2, 3]:
ind = oi+1
orbit = orbits[oi]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
test1 = SkyCoord(289.3792274160115, -22.304595055979675, unit = 'deg')
orb1 = SkyCoord(289.3855, -22.3051, unit = 'deg')
orb1.separation(test1)
orbit
import sunpy
sunpy.__version__
test1 = SkyCoord(289.898451566591, -22.158432904027155 , unit = 'deg')
orb1 = SkyCoord(289.9047, -22.1589, unit = 'deg')
orb1.separation(test1)
sun_pa = planning.get_nustar_roll(tstart, 0.)
pa = planning.get_nustar_roll(tstart, 45*u.deg)
offset = [0, 0.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sun_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
# Orbit 1 (AR)
offset = [900, -300.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
planning.make_test_region(sky_pos[0], sky_pos[1], pa, sun_pos[0], sun_pos[1], sun_pa)
print(pa)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Acoustics
Step5: Burgers
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from IPython.display import FileLink
from exact_solvers import acoustics_demos
def make_bump_animation_html(numframes, file_name):
video_html = acoustics_demos.bump_animation(numframes)
f = open(file_name,'w')
f.write('<html>\n')
file_name = 'acoustics_bump_animation.html'
descr = <h1>Acoustics Bump Animation</h1>
This animation is to accompany
<a href="http://www.clawpack.org/riemann_book/html/Acoustics.html">this
notebook</a>,\n from the book <a
href="http://www.clawpack.org/riemann_book/index.html">Riemann Problems and
Jupyter Solutions</a>\n
f.write(descr)
f.write("<p>")
f.write(video_html)
print("Created ", file_name)
f.close()
file_name = 'html_animations/acoustics_bump_animation.html'
anim = make_bump_animation_html(numframes=50, file_name=file_name)
FileLink(file_name)
from exact_solvers import burgers_demos
from importlib import reload
reload(burgers_demos)
video_html = burgers_demos.bump_animation(numframes = 50)
file_name = 'html_animations/burgers_animation0.html'
f = open(file_name,'w')
f.write('<html>\n')
descr = <h1>Burgers' Equation Animation</h1>
This animation is to accompany
<a href="http://www.clawpack.org/riemann_book/html/Burgers.html">this
notebook</a>,\n from the book <a
href="http://www.clawpack.org/riemann_book/index.html">Riemann Problems and
Jupyter Solutions</a>\n
<p>
Burgers' equation with hump initial data, evolving into a shock wave
followed by a rarefaction wave.
f.write(descr)
f.write("<p>")
f.write(video_html)
print("Created ", file_name)
f.close()
FileLink(file_name)
def make_burgers_animation_html(ql, qm, qr, file_name):
video_html = burgers_demos.triplestate_animation(ql,qm,qr,numframes=50)
f = open(file_name,'w')
f.write('<html>\n')
descr = <h1>Burgers' Equation Animation</h1>
This animation is to accompany
<a href="http://www.clawpack.org/riemann_book/html/Burgers.html">this
notebook</a>,\n from the book <a
href="http://www.clawpack.org/riemann_book/index.html">Riemann Problems and
Jupyter Solutions</a>\n
<p>
Burgers' equation with three constant states as initial data,\n
ql = %.1f, qm = %.1f, qr = %.1f % (ql,qm,qr)
f.write(descr)
f.write("<p>")
f.write(video_html)
print("Created ", file_name)
f.close()
file_name = 'html_animations/burgers_animation1.html'
make_burgers_animation_html(4., 2., 0., file_name)
FileLink(file_name)
file_name = 'html_animations/burgers_animation2.html'
make_burgers_animation_html(4., -1.5, 0.5, file_name)
FileLink(file_name)
file_name = 'html_animations/burgers_animation3.html'
make_burgers_animation_html(-1., 3., -2., file_name)
FileLink(file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This was developed using Python 3.6 (Anaconda) and TensorFlow version
Step2: Load Data
Step3: The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.
Step4: The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now.
Step5: Data Dimensions
Step6: Helper-functions for plotting images
Step7: Function used to plot 10 images in a 2x5 grid.
Step8: Function used to plot a single image.
Step9: Plot a few images to see if data is correct
Step10: TensorFlow Graph
Step11: The convolutional layers expect x to be encoded as a 4-rank tensor so we have to reshape it so its shape is instead [num_images, img_height, img_width, num_channels]. Note that img_height == img_width == img_size and num_images can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is
Step12: Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable x. The shape of this placeholder variable is [None, num_classes] which means it may hold an arbitrary number of labels and each label is a vector of length num_classes which is 10 in this case.
Step13: We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point.
Step14: Neural Network
Step15: The input image is then input to the first convolutional layer, which has 16 filters each of size 5x5 pixels. The activation-function is the Rectified Linear Unit (ReLU) described in more detail in Tutorial #02.
Step16: After the convolution we do a max-pooling which is also described in Tutorial #02.
Step17: Then we make a second convolutional layer, also with max-pooling.
Step18: The output then needs to be flattened so it can be used in fully-connected (aka. dense) layers.
Step19: We can now add fully-connected (or dense) layers to the neural network.
Step20: We need the neural network to classify the input images into 10 different classes. So the final fully-connected layer has num_classes=10 output neurons.
Step21: The outputs of the final fully-connected layer are sometimes called logits, so we have a convenience variable with that name which we will also use further below.
Step22: We use the softmax function to 'squash' the outputs so they are between zero and one, and so they sum to one.
Step23: This tells us how likely the neural network thinks the input image is of each possible class. The one that has the highest value is considered the most likely so its index is taken to be the class-number.
Step24: Loss-Function to be Optimized
Step25: We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications.
Step26: Optimization Method
Step27: Classification Accuracy
Step28: The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.
Step29: Optimize the Neural Network
Step30: Initialize variables
Step31: Helper-function to perform optimization iterations
Step32: This function performs a number of optimization iterations so as to gradually improve the variables of the neural network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations.
Step33: Helper-function to plot example errors
Step34: Helper-function to plot confusion matrix
Step35: Helper-function for showing the performance
Step36: Performance before any optimization
Step37: Performance after 10,000 optimization iterations
Step38: Optimizing the Input Images
Step40: Helper-function for finding the input image
Step42: This next function finds the images that maximize the first 10 features of a layer, by calling the above function 10 times.
Step43: First Convolutional Layer
Step44: Note how these are very simple shapes such as lines and angles. Some of these images may be completely white, which suggests that those features of the neural network are perhaps unused, so the number of features could be reduced in this layer.
Step45: Final output layer
Step46: Note how the predicted class indeed becomes 2 already within the first few iterations so the optimization is working as intended. Also note how the loss-measure is increasing rapidly until it apparently converges. This is because the loss-measure is actually just the value of the feature or neuron that we are trying to maximize. Because this is the logits-layer prior to the softmax, these values can potentially be infinitely high, but they are limited because we limit the image-values between 0 and 1.
Step47: Although some of the curves do hint somewhat at the digit 2, it is hard for a human to see why the neural network believes this is the optimal image for the digit 2. This can only be understood when the optimal images for the remaining digits are also shown.
Step48: These images may vary each time you run the optimization. Some of the images can be seen to somewhat resemble the hand-written digits. But the other images are often impossible to recognize and it is hard to understand why the neural network thinks these are the optimal input images for those digits.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import math
tf.__version__
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
data.test.cls = np.argmax(data.test.labels, axis=1)
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_images10(images, smooth=True):
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
# Create figure with sub-plots.
fig, axes = plt.subplots(2, 5)
# Adjust vertical spacing.
fig.subplots_adjust(hspace=0.1, wspace=0.1)
# For each entry in the grid.
for i, ax in enumerate(axes.flat):
# Get the i'th image and only use the desired pixels.
img = images[i, :, :]
# Plot the image.
ax.imshow(img, interpolation=interpolation, cmap='binary')
# Remove ticks.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_image(image):
plt.imshow(image, interpolation='nearest', cmap='binary')
plt.xticks([])
plt.yticks([])
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
net = x_image
net = tf.layers.conv2d(inputs=net, name='layer_conv1', padding='same',
filters=16, kernel_size=5, activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
net = tf.layers.conv2d(inputs=net, name='layer_conv2', padding='same',
filters=36, kernel_size=5, activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
net = tf.contrib.layers.flatten(net)
# This should eventually be replaced by:
# net = tf.layers.flatten(net)
net = tf.layers.dense(inputs=net, name='layer_fc1',
units=128, activation=tf.nn.relu)
net = tf.layers.dense(inputs=net, name='layer_fc_out',
units=num_classes, activation=None)
logits = net
y_pred = tf.nn.softmax(logits=logits)
y_pred_cls = tf.argmax(y_pred, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=logits)
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.global_variables_initializer())
train_batch_size = 64
# Counter for total number of iterations performed so far.
total_iterations = 0
def optimize(num_iterations):
# Ensure we update the global variable rather than a local copy.
global total_iterations
for i in range(total_iterations,
total_iterations + num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status every 100 iterations.
if i % 100 == 0:
# Calculate the accuracy on the training-set.
acc = session.run(accuracy, feed_dict=feed_dict_train)
# Message for printing.
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
# Print it.
print(msg.format(i + 1, acc))
# Update the total number of iterations performed.
total_iterations += num_iterations
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# Split the test-set into smaller batches of this size.
test_batch_size = 256
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# Number of images in the test-set.
num_test = len(data.test.images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_test, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + test_batch_size, num_test)
# Get the images from the test-set between index i and j.
images = data.test.images[i:j, :]
# Get the associated labels.
labels = data.test.labels[i:j, :]
# Create a feed-dict with these images and labels.
feed_dict = {x: images,
y_true: labels}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Convenience variable for the true class-numbers of the test-set.
cls_true = data.test.cls
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
print_test_accuracy()
%%time
optimize(num_iterations=10000)
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
def get_conv_layer_names():
graph = tf.get_default_graph()
# Create a list of names for the operations in the graph
# for the Inception model where the operator-type is 'Conv2D'.
names = [op.name for op in graph.get_operations() if op.type=='Conv2D']
return names
conv_names = get_conv_layer_names()
conv_names
len(conv_names)
def optimize_image(conv_id=None, feature=0,
num_iterations=30, show_progress=True):
Find an image that maximizes the feature
given by the conv_id and feature number.
Parameters:
conv_id: Integer identifying the convolutional layer to
maximize. It is an index into conv_names.
If None then use the last fully-connected layer
before the softmax output.
feature: Index into the layer for the feature to maximize.
num_iteration: Number of optimization iterations to perform.
show_progress: Boolean whether to show the progress.
# Create the loss-function that must be maximized.
if conv_id is None:
# If we want to maximize a feature on the last layer,
# then we use the fully-connected layer prior to the
# softmax-classifier. The feature no. is the class-number
# and must be an integer between 1 and 1000.
# The loss-function is just the value of that feature.
loss = tf.reduce_mean(logits[:, feature])
else:
# If instead we want to maximize a feature of a
# convolutional layer inside the neural network.
# Get the name of the convolutional operator.
conv_name = conv_names[conv_id]
# Get the default TensorFlow graph.
graph = tf.get_default_graph()
# Get a reference to the tensor that is output by the
# operator. Note that ":0" is added to the name for this.
tensor = graph.get_tensor_by_name(conv_name + ":0")
# The loss-function is the average of all the
# tensor-values for the given feature. This
# ensures that we generate the whole input image.
# You can try and modify this so it only uses
# a part of the tensor.
loss = tf.reduce_mean(tensor[:,:,:,feature])
# Get the gradient for the loss-function with regard to
# the input image. This creates a mathematical
# function for calculating the gradient.
gradient = tf.gradients(loss, x_image)
# Generate a random image of the same size as the raw input.
# Each pixel is a small random value between 0.45 and 0.55,
# which is the middle of the valid range between 0 and 1.
image = 0.1 * np.random.uniform(size=img_shape) + 0.45
# Perform a number of optimization iterations to find
# the image that maximizes the loss-function.
for i in range(num_iterations):
# Reshape the array so it is a 4-rank tensor.
img_reshaped = image[np.newaxis,:,:,np.newaxis]
# Create a feed-dict for inputting the image to the graph.
feed_dict = {x_image: img_reshaped}
# Calculate the predicted class-scores,
# as well as the gradient and the loss-value.
pred, grad, loss_value = session.run([y_pred, gradient, loss],
feed_dict=feed_dict)
# Squeeze the dimensionality for the gradient-array.
grad = np.array(grad).squeeze()
# The gradient now tells us how much we need to change the
# input image in order to maximize the given feature.
# Calculate the step-size for updating the image.
# This step-size was found to give fast convergence.
# The addition of 1e-8 is to protect from div-by-zero.
step_size = 1.0 / (grad.std() + 1e-8)
# Update the image by adding the scaled gradient
# This is called gradient ascent.
image += step_size * grad
# Ensure all pixel-values in the image are between 0 and 1.
image = np.clip(image, 0.0, 1.0)
if show_progress:
print("Iteration:", i)
# Convert the predicted class-scores to a one-dim array.
pred = np.squeeze(pred)
# The predicted class for the Inception model.
pred_cls = np.argmax(pred)
# The score (probability) for the predicted class.
cls_score = pred[pred_cls]
# Print the predicted score etc.
msg = "Predicted class: {0}, score: {1:>7.2%}"
print(msg.format(pred_cls, cls_score))
# Print statistics for the gradient.
msg = "Gradient min: {0:>9.6f}, max: {1:>9.6f}, stepsize: {2:>9.2f}"
print(msg.format(grad.min(), grad.max(), step_size))
# Print the loss-value.
print("Loss:", loss_value)
# Newline.
print()
return image.squeeze()
def optimize_images(conv_id=None, num_iterations=30):
Find 10 images that maximize the 10 first features in the layer
given by the conv_id.
Parameters:
conv_id: Integer identifying the convolutional layer to
maximize. It is an index into conv_names.
If None then use the last layer before the softmax output.
num_iterations: Number of optimization iterations to perform.
# Which layer are we using?
if conv_id is None:
print("Final fully-connected layer before softmax.")
else:
print("Layer:", conv_names[conv_id])
# Initialize the array of images.
images = []
# For each feature do the following.
for feature in range(0,10):
print("Optimizing image for feature no.", feature)
# Find the image that maximizes the given feature
# for the network layer identified by conv_id (or None).
image = optimize_image(conv_id=conv_id, feature=feature,
show_progress=False,
num_iterations=num_iterations)
# Squeeze the dim of the array.
image = image.squeeze()
# Append to the list of images.
images.append(image)
# Convert to numpy-array so we can index all dimensions easily.
images = np.array(images)
# Plot the images.
plot_images10(images=images)
optimize_images(conv_id=0)
optimize_images(conv_id=1)
image = optimize_image(conv_id=None, feature=2,
num_iterations=10, show_progress=True)
plot_image(image)
optimize_images(conv_id=None)
# This has been commented out in case you want to modify and experiment
# with the Notebook without having to restart it.
# session.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 5T_데이터 분석을 위한 SQL 실습 (4) - SQL Advanced
Step3: Store 1의 등급별 매출 중 "R", "PG-13"의 매출
Step6: 배우별 매출
|
<ASSISTANT_TASK:>
Python Code:
import pymysql
db = pymysql.connect(
"db.fastcamp.us",
"root",
"dkstncks",
"sakila",
charset='utf8',
)
rental_df = pd.read_sql("SELECT * FROM rental;", db)
inventory_df = pd.read_sql("SELECT * FROM inventory;", db)
film_df = pd.read_sql("SELECT * FROM film;", db)
film_category_df = pd.read_sql("SELECT * FROM film_category;", db)
category_df = pd.read_sql("SELECT * FROM category;", db)
rental_df.head(1)
inventory_df.head(1)
film_df.head(1)
film_category_df.head(1)
category_df.head(1)
SQL_QUERY =
SELECT
c.category_id category_id,
c.name category_name,
COUNT(*) rentals_per_category
FROM
rental r
JOIN inventory i ON r.inventory_id = i.inventory_id
JOIN film f ON f.film_id = i.film_id
JOIN film_category fc ON fc.film_id = f.film_id
JOIN category c ON fc.category_id = c.category_id
WHERE
c.name IN ("Family", "Sports", "Comedy")
GROUP BY
category_id
ORDER BY rentals_per_category DESC
;
pd.read_sql(SQL_QUERY, db)
payment_df = pd.read_sql("SELECT * FROM payment;", db)
film_df.head(1)
payment_df.head(1)
inventory_df.head(1)
rental_df.head(1)
SQL_QUERY =
SELECT
i.store_id store_id,
f.rating rating,
SUM(p.amount) total_revenue
FROM
payment p
JOIN rental r ON p.rental_id = r.rental_id
JOIN inventory i ON r.inventory_id = i.inventory_id
JOIN film f ON i.film_id = f.film_id
WHERE
i.store_id = 1
AND f.rating IN ("PG-13", "R")
GROUP BY
store_id,
rating
;
pd.read_sql(SQL_QUERY, db)
# 1. 영화별 매출 - rental, film, inventory
REVENUE_PER_FILM_SQL_QUERY =
SELECT
f.film_id film_id,
COUNT(*) * f.rental_rate revenue
FROM
rental r
JOIN inventory i ON r.inventory_id = i.inventory_id
JOIN film f ON i.film_id = f.film_id
GROUP BY
film_id
;
pd.read_sql(REVENUE_PER_FILM_SQL_QUERY, db)
# 2. 배우별 매출 - actor, film_actor
SQL_QUERY =
SELECT
a.actor_id,
a.last_name last_name,
a.first_name first_name,
SUM(rpf.revenue) revenue_per_actor
FROM ({REVENUE_PER_FILM_SQL_QUERY}) AS rpf
JOIN film_actor fa ON rpf.film_id = fa.film_id
JOIN actor a ON fa.actor_id = a.actor_id
GROUP BY
actor_id
ORDER BY revenue_per_actor DESC
;
.format(
REVENUE_PER_FILM_SQL_QUERY=REVENUE_PER_FILM_SQL_QUERY.replace(";",""),
)
pd.read_sql(SQL_QUERY, db)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: cegads.Scenario
Step2: The default ScenarioFactory inherits from the ECUK class which loads the full ECUK dataset. The ScenarioFactory loads data from the ECUK tables 3.08 (the number of households in UK by year), 3.10 (the total consumption of each appliance category by year) and 3.12 (appliance ownership by year). It calculates the number of appliances per household and the consumption per appliance for all available years.
Step3: The ScenarioFactory is callable directly. Calling the factory with an integer year value will return a Scenario instance loaded with data from the requested year.
Step4: We can inspect the underlying data for the given year. Here I extract the data and create a plot showing appliances per household. For most appliances the number per household is less than 1.0.
Step5: Generating Appliance instances
Step6: The appliances are represented above by three attributes
Step7: Appliance simulation
Step8: Plotting the results shows the square wave form of the simulation. There is one duty cycle each day. The width of the cycle is determined by the user, the height is calculated from the cycle width and the daily consumption figure. The timing of the cycle is determined by drawing randomly from the overall consumption distribution.
Step9: Generating Household instances
Step10: Each 2-tuple represents an appliance type and a duty_cycle. That is, the width of the square wave to be generated by the appliance during the simulation. Passing in these data as arguments to the Scenario.household() method will define the list of appliances to consider.
Step11: Multiple-household simulation
Step12: Now, I can plot the data from some of these households.
Step13: Combining the results from all the households produces a bit of a mess. We can see that the overall usage pattern is structured.
Step14: The mean consumption of each appliance type shows that there are differences between appliances with washer-dryers consuming the most and washing machines the least. This is a reflection of the data in ECUK table 3.10.
Step15: Discussion
Step16: We can now look at the average daily consumption of each appliance type in our simulation to see if they match.
Step17: We might expect these figures to match precisely but in fact they don't. This is due to an artifact in the modelling process. Sometimes a duty cycle begins near the end of the simulation period (or ends near the beginning) and so consumption for that cycle actually passes over the edge of the dataset. We can expect that simulated consumption should never exceed these figures. It is only a problem at the very edges of the simulation period (though at the boundary between days it is possible for an appliance to be running two duty cycles at the same time). This can be improved.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import pandas as pd
from cegads import ScenarioFactory
factory = ScenarioFactory()
wet_appliance_keys = ['Washing Machine', 'Dishwasher', 'Tumble Dryer', 'Washer-dryer']
df = factory._data.stack().unstack(level=0)
f, [ax1,ax2] = plt.subplots(1, 2, figsize=(12, 4))
for key in wet_appliance_keys:
ax1.plot(df.unstack(level=0).index, df[key]['consumption_per_appliance'], label=key)
ax2.plot(df.unstack(level=0).index, df[key]['appliances_per_household'], label=key)
plt.suptitle("Wet Appliances")
ax2.set_title("appliance numbers per household")
ax2.set_ylabel("appliances per household")
ax2.legend(loc=6, fontsize=8)
ax1.set_title("consumption per appliance")
ax1.set_ylabel("consumption per appliance (Wh/year)")
ax1.legend(loc=1, fontsize=8)
plt.show()
year = 2013
scenario = factory(year)
f, [ax2, ax1] = plt.subplots(1, 2, figsize=(12, 4), sharex=True)
ind = np.arange(len(scenario.index))
width = 0.65
ax1.bar(ind, scenario.appliances_per_household, width, color="red")
ax2.bar(ind, scenario.consumption_per_appliance, width, color="blue")
for ax in [ax1,ax2]:
ax.set_xticks(ind+width/2.)
ax.set_xticklabels(scenario.index, rotation=90)
ax.set_xlim(0, len(scenario._data.index))
ax1.axhline(y=1, ls="--", color="black", lw=1)
ax1.set_ylabel("appliances per household")
ax2.set_ylabel("consumption per\nappliance (Wh)")
plt.tight_layout()
test_appliances = [scenario.appliance(app, 60) for app in wet_appliance_keys]
test_appliances
f, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 4))
for app in test_appliances:
ax1.plot(app.profile.index, app.profile * 100, label=app.name)
ax2.plot(app.profile.index, app.profile.diff()*60*app.daily_total, label=app.name) # * 60 for Wh -> W conversion
ax1.legend(loc=2, fontsize=8)
ax2.legend(loc=2, fontsize=8)
ax1.set_title("cumulative distribution")
ax2.set_title("actual consumption")
ax1.set_ylabel('cumulative frequency (%)')
ax2.set_ylabel('load (W)')
for ax in [ax1, ax2]:
ax.set_xlabel('time')
ax.xaxis.set_major_locator(mpl.dates.HourLocator(interval=3))
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter("%H:%M"))
ax1.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%.0f%%"))
plt.show()
freq = "1Min"
start = datetime.datetime(year, 1, 1)
days = 7
test_simulations = [app.simulation(days, freq, start=start) for app in test_appliances]
f, ax = plt.subplots(1, 1, figsize=(18, 3))
for app, sim in zip(test_appliances, test_simulations):
ax.plot(sim.index, sim*60, label=app.name)
ax.legend(fontsize=10, loc="best")
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter("%d-%b"))
ax.set_ylabel("Consumption (W)")
ax.set_ylim(top=ax.get_ylim()[1]*1.2)
ax.grid()
plt.show()
appliances_to_consider = [
('Washing Machine', 80),
('Dishwasher', 100),
('Tumble Dryer', 120),
('Washer-dryer', 180)
]
n = 150
households = [scenario.household(appliances_to_consider) for i in range(n)]
for h in households[:3]:
print(h)
names = ["household {:03}".format(i + 1) for i, h in enumerate(households) if len(h)]
result = pd.concat([h.simulation(days, freq, start=start) for h in households if len(h)], keys=names, axis=1)
result.columns.names = ['household', 'appliance']
loc = mpl.dates.DayLocator(interval=2)
fmt = mpl.dates.DateFormatter("%d-%b")
xax, yax = 4, 4
f, axes = plt.subplots(xax, yax, sharex=True, sharey=True, figsize=(12, 6))
for row, ax_row in enumerate(axes):
for col, ax in enumerate(ax_row):
name = names[row*yax + col]
for key in result[name]:
ax.plot(result.index, result[name][key])
ax.set_title(name)
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(fmt)
plt.tight_layout()
plt.show()
f, ax = plt.subplots(figsize=(12, 2))
ax.plot(result.index, result*60, alpha=0.5, lw=0.25)
plt.show()
df = result.copy()
df.columns = df.columns.droplevel()
appliance_mean_profile = df.groupby(df.columns, axis=1).mean()
f, ax = plt.subplots(figsize=(12, 4))
for key in appliance_mean_profile:
ax.plot(appliance_mean_profile.index, appliance_mean_profile[key]*60, alpha=0.75, label="{}{}".format(key[:-2], 's'))
ax.plot(df.index, df.mean(axis=1)*60, color="black", lw=1.5) #average across all households
ax.set_ylabel("appliance load (W)")
plt.legend(fontsize=10)
plt.show()
from cegads import ECUK
ecuk = ECUK()
for device in wet_appliance_keys:
print("{:20} {}".format(device, ecuk(2013, device).consumption_per_appliance / 365))
totals = df.sum() / 7 # total consumption divided by 7 for each appliance
totals.groupby(totals.index).mean() # average across all appliance types
sims = [app.simulation(365, "30Min") for app in test_appliances]
shapes = [sim.groupby(sim.index.time).mean() for sim in sims]
f, axes = plt.subplots(1, len(sims), figsize=(16, 2.5), sharey=True)
for ax, app, shape in zip(axes, test_appliances, shapes):
i = [datetime.datetime.combine(app.profile.index[0], t) for t in shape.index]
ax.plot_date(i, shape * 2, color='red', label="simulation", ls="-", marker=None) # convert Wh per half-hour to W (*2)
ax.plot(app.profile.index, app.profile.diff() * app.daily_total * 60, color="black", lw=1.5, label="model")
ax.set_title(app.name)
ax.xaxis.set_major_locator(mpl.dates.HourLocator(interval=6))
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M'))
ax.set_ylabel("consumption (W)")
ax.legend(loc="best", fontsize=8)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: O módulo requests é utilizado para fazer requisições HTTP, ele será útil para que possamos requisitar novas páginas com conteúdo do Facebook.
Step2: 'me/feed'
Step3: Exercicio 2 - Altere o programa da aula6-parte5-recuperar ('me/feed') e imprima, além do histórico, o nome, tipo e a hora da criação.
Step4: Note que só retornamos 25 resultados, porém nosso feed de notícias tem muita informação ainda para ser recuperada!
Step5: Da mesma forma que foi feito anteriormente, podemos requisitar a próxima página até que atinja a quantidade desejada.
|
<ASSISTANT_TASK:>
Python Code:
import facebook
import simplejson as json
import requests
req = requests.get('http://python.org')
req.status_code # Se o código for 200, a requisição foi realizada.
#req.text
'Python' in req.text
req.close()
import facebook
access_token = 'EAACEdEose0cBAAFGsk2U0Jo1Kn9GZCWuXoMwflMusq2ajIqwcF3VU9vEp2M8y5ZB3stHJ5AAShfQvL72JYV8zGQXGvN5996EXDn9FV2DJvS8FTflaak0UuC6ZAm5HPjI2gsLRVEzdYlftGL93ZBWNVJuWzRnENYY7a3dpALP0AZDZD'
api = facebook.GraphAPI(access_token, version='2.3')
noticias = api.get_object('me/feed')
#print(json.dumps(noticias, indent=4))
for item in range(0, len(noticias['data'])):
try:
print(item, '--->', noticias['data'][item]['story'])
except:
pass
noticias = api.get_object('me/feed')
while True:
try:
for item in range(0, len(noticias['data'])):
try:
print(item, '--->', noticias['data'][item]['story'])
except:
pass
noticias = requests.get(noticias['paging']['next']).json()
except Exception as e:
print(e)
break
feed_noticias = api.get_object('me/home')
len(feed_noticias['data'])
for item in range(0, len(feed_noticias['data'])):
try:
print(item, '---->', feed_noticias['data'][item]['name'])
except:
pass
feed_noticias['data'][1].keys()
feed_noticias['data'][1]['type']
feed_noticias['data'][1]['name']
#feed_noticias['data'][1]['application']
feed_noticias['data'][1]['updated_time']
feed_noticias['data'][1]['created_time']
# feed_noticias['data'][1]['comments']
feed_noticias['data'][1]['likes']
feed_noticias['paging']
access_token = 'EAACEdEose0cBANbSsjf7TIxMNSvoGZCOZCa2Nbfjso2ZAQwKCrmotS5wEXPZCNvHx7zxqLU30Mn5J9cJdtTDJtALuSfn4mSSmCuASZAPTQy2DE3LSeboUzfZCzTIvJPrcR1E4ScwKyDAPN2mOPx1hNwfKPyYdikzGc32tauaKGMQZDZD'
amigos = api.get_connections("me", "friends")
todos_amigos = []
while True:
try:
for amigo in amigos['data']:
todos_amigos.append(amigo['name'])
amigos = requests.get(amigos['paging']['next']).json()
except KeyError:
break
print(todos_amigos)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot several selected histograms for the train data
Step2: Before we will explore the classification with histograms let's try the default approach using KNN-DTW
Step3: Bingo! All classifications are correct! Though it took some time for executing KNN-DTW classification ... Let's go deeper into the details of KNN-DTW for our accelerometer data and use KNN=3 with distance information. This time we will measure the performance as well.
Step4: All results are correct as well! But the executime time is not the best one - almost 1 min 30 seconds ... So let's try to use a faster alternative for classification, which doesn't depend upon DTW and its execution time
Step7: 19.1 ms for the total execution - much better!
|
<ASSISTANT_TASK:>
Python Code:
from tinylearn import KnnDtwClassifier
from tinylearn import CommonClassifier
import pandas as pd
import numpy as np
import os
train_labels = []
test_labels = []
train_data_raw = []
train_data_hist = []
test_data_raw = []
test_data_hist = []
# Utility function for normalizing numpy arrays
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
# Loading all data for training and testing from TXT files
def load_data():
for d in os.listdir("data"):
for f in os.listdir(os.path.join("data", d)):
if f.startswith("TRAIN"):
train_labels.append(d)
tr = normalize(np.ravel(pd.read_csv(os.path.join("data", d, f),
delim_whitespace=True,
header=None)))
train_data_raw.append(tr)
train_data_hist.append(np.histogram(tr, bins=20)[0])
else:
test_labels.append(d)
td = normalize(np.ravel(pd.read_csv(os.path.join("data", d, f),
delim_whitespace=True,
header=None)))
test_data_raw.append(td)
test_data_hist.append(np.histogram(td, bins=20)[0])
load_data()
%matplotlib inline
import matplotlib.pyplot as plt
for i in range (0, 35, 5):
hist, bins = np.histogram(train_data_raw[i], bins=20)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.title(train_labels[i])
plt.bar(center, hist, align='center', width=width)
plt.show()
# Raw sequence labeling with KnnDtwClassifier and KNN=1
clf1 = KnnDtwClassifier(1)
clf1.fit(train_data_raw, train_labels)
for index, t in enumerate(test_data_raw):
print("KnnDtwClassifier prediction for " +
str(test_labels[index]) + " = " + str(clf1.predict(t)))
# Let's do an extended prediction to get the distances to 3 nearest neighbors
clf2 = KnnDtwClassifier(3)
clf2.fit(train_data_raw, train_labels)
def classify2():
for index, t in enumerate(test_data_raw):
res = clf2.predict_ext(t)
nghs = np.array(train_labels)[res[1]]
print("KnnDtwClassifier neighbors for " + str(test_labels[index]) + " = " + str(nghs))
print("KnnDtwClassifier distances to " + str(nghs) + " = " + str(res[0]))
%time classify2()
# Let's use CommonClassifier with the histogram data for faster prediction
clf3 = CommonClassifier(default=True)
clf3.fit(train_data_hist, train_labels)
clf3.print_fit_summary()
print("\n")
def classify3():
for index, t in enumerate(test_data_hist):
print("CommonClassifier prediction for " + str(test_labels[index]) + " = "
+ str(clf3.predict(t)))
%time classify3()
class KnnDtwClassifier(BaseEstimator, ClassifierMixin):
Custom classifier implementation for Scikit-Learn using Dynamic Time Warping (DTW)
and KNN (K-Nearest Neighbors) algorithms.
This classifier can be used for labeling the varying-length sequences, like time series
or motion data.
FastDTW library is used for faster DTW calculations - linear instead of quadratic complexity.
def __init__(self, n_neighbors=1):
self.n_neighbors = n_neighbors
self.features = []
self.labels = []
def get_distance(self, x, y):
return fastdtw(x, y)[0]
def fit(self, X, y=None):
for index, l in enumerate(y):
self.features.append(X[index])
self.labels.append(l)
return self
def predict(self, X):
dist = np.array([self.get_distance(X, seq) for seq in self.features])
indices = dist.argsort()[:self.n_neighbors]
return np.array(self.labels)[indices]
def predict_ext(self, X):
dist = np.array([self.get_distance(X, seq) for seq in self.features])
indices = dist.argsort()[:self.n_neighbors]
return (dist[indices],
indices)
class CommonClassifier(object):
Helper class to execute the common classification workflow - from training to prediction
to metrics reporting with the popular ML algorithms, like SVM or Random Forest.
Includes the default list of estimators with instances and parameters, which have been
proven to work well.
def __init__(self, default=True, cv=5, reduce_func=None):
self.cv = cv
self.default = default
self.reduce_func = reduce_func
self.reducer = None
self.grid_search = None
def add_estimator(self, name, instance, params):
self.grid_search.add_estimator(name, instance, params)
def fit(self, X, y=None):
if self.default:
self.grid_search = GridSearchEstimatorSelector(X, y, self.cv)
self.grid_search.add_estimator('SVC', SVC(), {'kernel': ["linear", "rbf"],
'C': [1, 5, 10, 50],
'gamma': [0.0, 0.001, 0.0001]})
self.grid_search.add_estimator('RandomForestClassifier', RandomForestClassifier(),
{'n_estimators': [5, 10, 20, 50]})
self.grid_search.add_estimator('ExtraTreeClassifier', ExtraTreesClassifier(),
{'n_estimators': [5, 10, 20, 50]})
self.grid_search.add_estimator('LogisticRegression', LogisticRegression(),
{'C': [1, 5, 10, 50], 'solver': ["lbfgs", "liblinear"]})
self.grid_search.add_estimator('SGDClassifier', SGDClassifier(),
{'n_iter': [5, 10, 20, 50], 'alpha': [0.0001, 0.001],
'loss': ["hinge", "modified_huber",
"huber", "squared_hinge", "perceptron"]})
if self.reduce_func is not None:
self.reducer = FeatureReducer(X, y, self.reduce_func)
self.reducer.reduce(10)
return self.grid_search.select_estimator()
def print_fit_summary(self):
return self.grid_search.print_summary()
def predict(self, X):
if self.grid_search.selected_name is not None:
if self.reduce_func is not None and len(self.reducer.dropped_cols) > 0:
X.drop(self.reducer.dropped_cols, axis=1, inplace=True)
return self.grid_search.best_estimator.predict(X)
else:
return None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since finding natural neighbors already calculates circumcenters and circumradii, return
Step2: We can then use the information in tri_info later.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.gridding.triangles import find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig = plt.figure(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
plt.plot(x, y)
plt.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
plt.plot(x, y, 'k.', markersize=6)
plt.annotate('test ' + str(i), xy=(x, y))
neighbors, tri_info = find_natural_neighbors(tri, test_points)
print(neighbors)
fig = plt.figure(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
plt.plot(x, y)
plt.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenter and radius information from tri_info, plot circumcircles and
# circumcenters for each triangle.
for _idx, item in tri_info.items():
plt.plot(item['cc'][0], item['cc'][1], 'k.', markersize=5)
circ = plt.Circle(item['cc'], item['r'], edgecolor='k', facecolor='none',
transform=fig.axes[0].transData)
fig.axes[0].add_artist(circ)
plt.axes().set_aspect('equal', 'datalim')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CUDA
Step2: Global params
Step3: Load a CSV file for Binary classification (numpy)
Step4: Feature enrichement
Step5: Train / Validation / Test Split
Step6: Correlated columns
Step7: Create PyTorch GPU tensors from numpy arrays
Step8: The NN model
Step9: The cross-entropy loss function
Step10: Training in batches + Measuring the performance of the deep learning model
Step11: Visualize Loss Graph using Visdom
Step12: Performance of the deep learning model on the Validation set
Step13: Submission on Test set
Step14: Create a CSV with the ID's and the coresponding probabilities.
|
<ASSISTANT_TASK:>
Python Code:
import torch
import sys
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from sklearn import cross_validation
from sklearn import metrics
from sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit, cross_val_score, train_test_split
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
# call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())
print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())
import numpy
import numpy as np
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
Tensor = FloatTensor
import pandas
import pandas as pd
import logging
handler=logging.basicConfig(level=logging.INFO)
lgr = logging.getLogger(__name__)
%matplotlib inline
# !pip install psutil
import psutil
import os
def cpuStats():
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory()) # physical memory usage
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB...I think
print('memory GB:', memoryUse)
cpuStats()
# %%timeit
use_cuda = torch.cuda.is_available()
# use_cuda = False
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
Tensor = FloatTensor
lgr.info("USE CUDA=" + str (use_cuda))
torch.backends.cudnn.benchmark = True
# ! watch -n 0.1 'ps f -o user,pgrp,pid,pcpu,pmem,start,time,command -p `lsof -n -w -t /dev/nvidia*`'
# sudo apt-get install dstat #install dstat
# sudo pip install nvidia-ml-py #install Python NVIDIA Management Library
# wget https://raw.githubusercontent.com/datumbox/dstat/master/plugins/dstat_nvidia_gpu.py
# sudo mv dstat_nvidia_gpu.py /usr/share/dstat/ #move file to the plugins directory of dstat
# Data params
TARGET_VAR= 'target'
TOURNAMENT_DATA_CSV = 'numerai_tournament_data.csv'
TRAINING_DATA_CSV = 'numerai_training_data.csv'
BASE_FOLDER = 'numerai/'
# fix seed
seed=17*19
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed(seed)
# %%timeit
df_train = pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV)
df_train.head(5)
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from collections import defaultdict
# def genBasicFeatures(inDF):
# print('Generating basic features ...')
# df_copy=inDF.copy(deep=True)
# magicNumber=21
# feature_cols = list(inDF.columns)
# inDF['x_mean'] = np.mean(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_median'] = np.median(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_std'] = np.std(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_skew'] = scipy.stats.skew(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_kurt'] = scipy.stats.kurtosis(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_var'] = np.var(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_max'] = np.max(df_copy.ix[:, 0:magicNumber], axis=1)
# inDF['x_min'] = np.min(df_copy.ix[:, 0:magicNumber], axis=1)
# return inDF
def addPolyFeatures(inDF, deg=2):
print('Generating poly features ...')
df_copy=inDF.copy(deep=True)
poly=PolynomialFeatures(degree=deg)
p_testX = poly.fit(df_copy)
# AttributeError: 'PolynomialFeatures' object has no attribute 'get_feature_names'
target_feature_names = ['x'.join(['{}^{}'.format(pair[0],pair[1]) for pair in tuple if pair[1]!=0]) for tuple in [zip(df_copy.columns,p) for p in poly.powers_]]
df_copy = pd.DataFrame(p_testX.transform(df_copy),columns=target_feature_names)
return df_copy
def oneHOT(inDF):
d = defaultdict(LabelEncoder)
X_df=inDF.copy(deep=True)
# Encoding the variable
X_df = X_df.apply(lambda x: d['era'].fit_transform(x))
return X_df
from sklearn import preprocessing
# Train, Validation, Test Split
def loadDataSplit():
df_train = pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV)
# TOURNAMENT_DATA_CSV has both validation and test data provided by NumerAI
df_test_valid = pd.read_csv(BASE_FOLDER + TOURNAMENT_DATA_CSV)
answers_1_SINGLE = df_train[TARGET_VAR]
df_train.drop(TARGET_VAR, axis=1,inplace=True)
df_train.drop('id', axis=1,inplace=True)
df_train.drop('era', axis=1,inplace=True)
df_train.drop('data_type', axis=1,inplace=True)
# df_train=oneHOT(df_train)
df_train.to_csv(BASE_FOLDER + TRAINING_DATA_CSV + 'clean.csv', header=False, index = False)
df_train= pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV + 'clean.csv', header=None, dtype=np.float32)
df_train = pd.concat([df_train, answers_1_SINGLE], axis=1)
feature_cols = list(df_train.columns[:-1])
# print (feature_cols)
target_col = df_train.columns[-1]
trainX, trainY = df_train[feature_cols], df_train[target_col]
# TOURNAMENT_DATA_CSV has both validation and test data provided by NumerAI
# Validation set
df_validation_set=df_test_valid.loc[df_test_valid['data_type'] == 'validation']
df_validation_set=df_validation_set.copy(deep=True)
answers_1_SINGLE_validation = df_validation_set[TARGET_VAR]
df_validation_set.drop(TARGET_VAR, axis=1,inplace=True)
df_validation_set.drop('id', axis=1,inplace=True)
df_validation_set.drop('era', axis=1,inplace=True)
df_validation_set.drop('data_type', axis=1,inplace=True)
# df_validation_set=oneHOT(df_validation_set)
df_validation_set.to_csv(BASE_FOLDER + TRAINING_DATA_CSV + '-validation-clean.csv', header=False, index = False)
df_validation_set= pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV + '-validation-clean.csv', header=None, dtype=np.float32)
df_validation_set = pd.concat([df_validation_set, answers_1_SINGLE_validation], axis=1)
feature_cols = list(df_validation_set.columns[:-1])
target_col = df_validation_set.columns[-1]
valX, valY = df_validation_set[feature_cols], df_validation_set[target_col]
# Test set for submission (not labeled)
df_test_set = pd.read_csv(BASE_FOLDER + TOURNAMENT_DATA_CSV)
# df_test_set=df_test_set.loc[df_test_valid['data_type'] == 'live']
df_test_set=df_test_set.copy(deep=True)
df_test_set.drop(TARGET_VAR, axis=1,inplace=True)
tid_1_SINGLE = df_test_set['id']
df_test_set.drop('id', axis=1,inplace=True)
df_test_set.drop('era', axis=1,inplace=True)
df_test_set.drop('data_type', axis=1,inplace=True)
# df_test_set=oneHOT(df_validation_set)
feature_cols = list(df_test_set.columns) # must be run here, we dont want the ID
# print (feature_cols)
df_test_set = pd.concat([tid_1_SINGLE, df_test_set], axis=1)
testX = df_test_set[feature_cols].values
return trainX, trainY, valX, valY, testX, df_test_set
# %%timeit
trainX, trainY, valX, valY, testX, df_test_set = loadDataSplit()
min_max_scaler = preprocessing.MinMaxScaler()
# # Number of features for the input layer
N_FEATURES=trainX.shape[1]
print (trainX.shape)
print (trainY.shape)
print (valX.shape)
print (valY.shape)
print (testX.shape)
print (df_test_set.shape)
# print (trainX)
# seperate out the Categorical and Numerical features
import seaborn as sns
numerical_feature=trainX.dtypes[trainX.dtypes!= 'object'].index
categorical_feature=trainX.dtypes[trainX.dtypes== 'object'].index
print ("There are {} numeric and {} categorical columns in train data".format(numerical_feature.shape[0],categorical_feature.shape[0]))
corr=trainX[numerical_feature].corr()
sns.heatmap(corr)
from pandas import *
import numpy as np
from scipy.stats.stats import pearsonr
import itertools
# from https://stackoverflow.com/questions/17778394/list-highest-correlation-pairs-from-a-large-correlation-matrix-in-pandas
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n=5):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
print("Top Absolute Correlations")
print(get_top_abs_correlations(trainX, 5))
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def XnumpyToTensor(x_data_np):
x_data_np = np.array(x_data_np, dtype=np.float32)
print(x_data_np.shape)
print(type(x_data_np))
if use_cuda:
lgr.info ("Using the GPU")
X_tensor = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
lgr.info ("Using the CPU")
X_tensor = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
print(type(X_tensor.data)) # should be 'torch.cuda.FloatTensor'
print(x_data_np.shape)
print(type(x_data_np))
return X_tensor
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def YnumpyToTensor(y_data_np):
y_data_np=y_data_np.reshape((y_data_np.shape[0],1)) # Must be reshaped for PyTorch!
print(y_data_np.shape)
print(type(y_data_np))
if use_cuda:
lgr.info ("Using the GPU")
# Y = Variable(torch.from_numpy(y_data_np).type(torch.LongTensor).cuda())
Y_tensor = Variable(torch.from_numpy(y_data_np)).type(torch.FloatTensor).cuda() # BCEloss requires Float
else:
lgr.info ("Using the CPU")
# Y = Variable(torch.squeeze (torch.from_numpy(y_data_np).type(torch.LongTensor))) #
Y_tensor = Variable(torch.from_numpy(y_data_np)).type(torch.FloatTensor) # BCEloss requires Float
print(type(Y_tensor.data)) # should be 'torch.cuda.FloatTensor'
print(y_data_np.shape)
print(type(y_data_np))
return Y_tensor
# p is the probability of being dropped in PyTorch
# NN params
DROPOUT_PROB = 0.65
LR = 0.005
MOMENTUM= 0.9
dropout = torch.nn.Dropout(p=1 - (DROPOUT_PROB))
sigmoid = torch.nn.Sigmoid()
tanh=torch.nn.Tanh()
relu=torch.nn.LeakyReLU()
lgr.info(dropout)
hiddenLayer1Size=256
hiddenLayer2Size=int(hiddenLayer1Size/2)
hiddenLayer3Size=int(hiddenLayer1Size/2)
hiddenLayer4Size=int(hiddenLayer1Size/2)
linear1=torch.nn.Linear(N_FEATURES, hiddenLayer1Size, bias=True)
torch.nn.init.xavier_uniform(linear1.weight)
linear2=torch.nn.Linear(hiddenLayer1Size, hiddenLayer2Size)
torch.nn.init.xavier_uniform(linear2.weight)
linear3=torch.nn.Linear(hiddenLayer2Size,1)
torch.nn.init.xavier_uniform(linear3.weight)
net = torch.nn.Sequential(linear1,nn.BatchNorm1d(hiddenLayer1Size),dropout,relu,
linear2,nn.BatchNorm1d(hiddenLayer2Size),dropout,relu,
linear3,dropout,sigmoid,
)
lgr.info(net) # net architecture
# ! pip install sympy
import sympy as sp
sp.interactive.printing.init_printing(use_latex=True)
from IPython.display import display, Math, Latex
maths = lambda s: display(Math(s))
latex = lambda s: display(Latex(s))
#the loss function is as follows:
maths("\mathbf{Loss Function:} J(x, z) = -\sum_k^d[x_k \log z_k + (1-x_k)log(1-z_k)]")
# optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
# optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# optimizer = optim.SGD(net.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=5e-3)
#L2 regularization can easily be added to the entire model via the optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=LR,weight_decay=5e-2) # L2 regularization
loss_func=torch.nn.BCELoss() # Binary cross entropy: http://pytorch.org/docs/nn.html#bceloss
# http://andersonjo.github.io/artificial-intelligence/2017/01/07/Cost-Functions/
if use_cuda:
lgr.info ("Using the GPU")
net.cuda()
loss_func.cuda()
# cudnn.benchmark = True
lgr.info (optimizer)
lgr.info (loss_func)
import time
start_time = time.time()
epochs=160 # change to 400 for better results
div_factor=20
all_losses = []
loss_arr =[]
X_tensor_train= XnumpyToTensor(trainX)
Y_tensor_train= YnumpyToTensor(trainY)
print(type(X_tensor_train.data), type(Y_tensor_train.data)) # should be 'torch.cuda.FloatTensor'
# CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in a non-contiguous input.
# X_tensor_train=X_tensor_train.contiguous()
# Y_tensor_train=Y_tensor_train.contiguous()
# dataset = TensorDataset(data_tensor = X_tensor_train,target_tensor = Y_tensor_train)
# loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
# From here onwards, we must only use PyTorch Tensors
for step in range(epochs):
out = net(X_tensor_train) # input x and predict based on x
cost = loss_func(out, Y_tensor_train) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
cost.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % div_factor == 0:
loss = cost.data[0]
all_losses.append(loss)
print(step, cost.data.cpu().numpy())
# RuntimeError: can't convert CUDA tensor to numpy (it doesn't support GPU arrays).
# Use .cpu() to move the tensor to host memory first.
prediction = (net(X_tensor_train).data).float() # probabilities
# prediction = (net(X_tensor).data > 0.5).float() # zero or one
# print ("Pred:" + str (prediction)) # Pred:Variable containing: 0 or 1
# pred_y = prediction.data.numpy().squeeze()
pred_y = prediction.cpu().numpy().squeeze()
target_y = Y_tensor_train.cpu().data.numpy()
tu = (log_loss(target_y, pred_y),roc_auc_score(target_y,pred_y ))
print ('LOG_LOSS={}, ROC_AUC={} '.format(*tu))
loss_arr.append(cost.cpu().data.numpy()[0])
end_time = time.time()
print ('{} {:6.3f} seconds'.format('GPU:', end_time-start_time))
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(all_losses)
plt.show()
false_positive_rate, true_positive_rate, thresholds = roc_curve(target_y,pred_y)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('LOG_LOSS=' + str(log_loss(target_y, pred_y)))
plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.6f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.1, 1.2])
plt.ylim([-0.1, 1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# ! pip install visdom
from visdom import Visdom
viz = Visdom()
num_epoch=int(epochs/div_factor)
x = np.reshape([i for i in range(num_epoch)],newshape=[num_epoch,1])
loss_data = np.reshape(loss_arr,newshape=[num_epoch,1])
win3=viz.line(
X = x,
Y = loss_data,
opts=dict(
xtickmin=0,
xtickmax=num_epoch,
xtickstep=1,
ytickmin=0,
ytickmax=20,
ytickstep=1,
markercolor=np.random.randint(0, 255, num_epoch),
),
)
net.eval()
# Validation data
print (valX.shape)
print (valY.shape)
X_tensor_val= XnumpyToTensor(valX)
Y_tensor_val= YnumpyToTensor(valY)
print(type(X_tensor_val.data), type(Y_tensor_val.data)) # should be 'torch.cuda.FloatTensor'
predicted_val = (net(X_tensor_val).data).float() # probabilities
# predicted_val = (net(X_tensor_val).data > 0.5).float() # zero or one
pred_y = predicted_val.cpu().numpy()
target_y = Y_tensor_val.cpu().data.numpy()
print (type(pred_y))
print (type(target_y))
tu = (log_loss(target_y, pred_y),roc_auc_score(target_y,pred_y ))
print ('\n')
print ('log_loss={} roc_auc={} '.format(*tu))
false_positive_rate, true_positive_rate, thresholds = roc_curve(target_y,pred_y)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('LOG_LOSS=' + str(log_loss(target_y, pred_y)))
plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.6f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.1, 1.2])
plt.ylim([-0.1, 1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# print (pred_y)
# testX, df_test_set
# df[df.columns.difference(['b'])]
# trainX, trainY, valX, valY, testX, df_test_set = loadDataSplit()
print (df_test_set.shape)
columns = ['id', 'probability']
df_pred=pd.DataFrame(data=np.zeros((0,len(columns))), columns=columns)
# df_pred.id.astype(int)
for index, row in df_test_set.iterrows():
rwo_no_id=row.drop('id')
# print (rwo_no_id.values)
x_data_np = np.array(rwo_no_id.values, dtype=np.float32)
if use_cuda:
X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors
predicted_val = (net(X_tensor_test).data).float() # probabilities
p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float
df_pred = df_pred.append({'id':row['id'], 'probability':p_test},ignore_index=True)
# df_pred = df_pred.append({'id':row['id'].astype(int), 'probability':p_test},ignore_index=True)
df_pred.head(5)
# df_pred.id=df_pred.id.astype(int)
def savePred(df_pred, loss):
# csv_path = 'pred/p_{}_{}_{}.csv'.format(loss, name, (str(time.time())))
csv_path = 'pred/pred_{}_{}.csv'.format(loss, (str(time.time())))
df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None)
print (csv_path)
savePred (df_pred, log_loss(target_y, pred_y))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The limit as $h$ approaches zero, if it exists, should represent the slope of the tangent line to $(x, f(x))$.
Step2: It can be shown that the “centered difference formula" is better when computing numerical derivatives
Step3: There are two problems with numerical derivatives
Step4: Second approach
Step5: To fix this, we multiply the gradient by a step size. This step size (often called alpha) has to be chosen carefully, as a value too small will result in a long computation time, while a value too large will not give you the right result (by overshooting) or even fail to converge.
Step6: An important feature of gradient descent is that there should be a visible improvement over time
Step7: From derivatives to gradient
Step8: The function we have evaluated, $f({\mathbf x}) = x_1^2+x_2^2+x_3^2$, is $3$ at $(1,1,1)$ and the gradient vector at this point is $(2,2,2)$.
Step9: Let's start by choosing a random vector and then walking a step in the opposite direction of the gradient vector. We will stop when the difference between the new solution and the old solution is less than a tolerance value.
Step10: Alpha
Step11: Learning from data
Step12: Stochastic Gradient Descend
Step13: Exercise
Step14: Complete the following code in order to
Step15: Mini-batch Gradient Descent
Step16: Loss Funtions
|
<ASSISTANT_TASK:>
Python Code:
# numerical derivative at a point x
def f(x):
return x**2
def fin_dif(x,
f,
h = 0.00001):
'''
This method returns the derivative of f at x
by using the finite difference method
'''
return (f(x+h) - f(x))/h
x = 2.0
print "{:2.4f}".format(fin_dif(x,f))
for h in np.linspace(0.0, 1.0 , 5):
print "{:3.6f}".format(f(5+h)), "{:3.6f}".format(f(5)+h*fin_dif(5,f))
x = np.linspace(-1.5,-0.5, 100)
f = [i**2 for i in x]
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,f, 'r-')
plt.plot([-1.5, -0.5], [2, 0.0], 'k-', lw=2)
plt.plot([-1.4, -1.0], [1.96, 1.0], 'b-', lw=2)
plt.plot([-1],[1],'o')
plt.plot([-1.4],[1.96],'o')
plt.text(-1.0, 1.2, r'$x,f(x)$')
plt.text(-1.4, 2.2, r'$(x-h),f(x-h)$')
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.show
x = np.linspace(-15,15,100)
y = x**2
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.plot([0],[0],'o')
plt.ylim([-10,250])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
ax.text(0,
20,
'Minimum',
ha='center',
color=sns.xkcd_rgb['pale red'],
)
plt.show
x = np.linspace(-15,15,100)
y = -x**2
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.plot([0],[0],'o')
plt.ylim([-250,10])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
ax.text(0,
-30,
'Maximum',
ha='center',
color=sns.xkcd_rgb['pale red'],
)
plt.show
x = np.linspace(-15,15,100)
y = x**3
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.plot([0],[0],'o')
plt.ylim([-3000,3000])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
ax.text(0,
400,
'Saddle Point',
ha='center',
color=sns.xkcd_rgb['pale red'],
)
plt.show
x = np.linspace(-10,20,100)
y = x**2 - 6*x + 5
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.plot([3],[3**2 - 6*3 + 5],'o')
plt.ylim([-10,250])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
ax.text(3,
10,
'Min: x = 3',
ha='center',
color=sns.xkcd_rgb['pale red'],
)
plt.show
x = np.linspace(-10,20,100)
y = x**2 - 6*x + 5
start = 15
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.plot([start],[start**2 - 6*start + 5],'o')
ax.text(start,
start**2 - 6*start + 35,
'Start',
ha='center',
color=sns.xkcd_rgb['blue'],
)
d = 2 * start - 6
end = start - d
plt.plot([end],[end**2 - 6*end + 5],'o')
plt.ylim([-10,250])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
ax.text(end,
start**2 - 6*start + 35,
'End',
ha='center',
color=sns.xkcd_rgb['green'],
)
plt.show
old_min = 0
temp_min = 15
step_size = 0.01
precision = 0.0001
def f(x):
return x**2 - 6*x + 5
def f_derivative(x):
import math
return 2*x -6
mins = []
cost = []
while abs(temp_min - old_min) > precision:
old_min = temp_min
gradient = f_derivative(old_min)
move = gradient * step_size
temp_min = old_min - move
cost.append((3-temp_min)**2)
mins.append(temp_min)
# rounding the result to 2 digits because of the step size
print "Local minimum occurs at {:3.6f}.".format(round(temp_min,2))
x = np.linspace(-10,20,100)
y = x**2 - 6*x + 5
x, y = (zip(*enumerate(cost)))
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-', alpha=0.7)
plt.ylim([-10,150])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.show
x = np.linspace(-10,20,100)
y = x**2 - 6*x + 5
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x,y, 'r-')
plt.ylim([-10,250])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.plot(mins,cost,'o', alpha=0.3)
ax.text(start,
start**2 - 6*start + 25,
'Start',
ha='center',
color=sns.xkcd_rgb['blue'],
)
ax.text(mins[-1],
cost[-1]+20,
'End (%s steps)' % len(mins),
ha='center',
color=sns.xkcd_rgb['blue'],
)
plt.show
def f(x):
return sum(x_i**2 for x_i in x)
def fin_dif_partial_centered(x,
f,
i,
h=1e-6):
'''
This method returns the partial derivative of the i-th component of f at x
by using the centered finite difference method
'''
w1 = [x_j + (h if j==i else 0) for j, x_j in enumerate(x)]
w2 = [x_j - (h if j==i else 0) for j, x_j in enumerate(x)]
return (f(w1) - f(w2))/(2*h)
def fin_dif_partial_old(x,
f,
i,
h=1e-6):
'''
This method returns the partial derivative of the i-th component of f at x
by using the (non-centered) finite difference method
'''
w1 = [x_j + (h if j==i else 0) for j, x_j in enumerate(x)]
return (f(w1) - f(x))/h
def gradient_centered(x,
f,
h=1e-6):
'''
This method returns the gradient vector of f at x
by using the centered finite difference method
'''
return[round(fin_dif_partial_centered(x,f,i,h), 10) for i,_ in enumerate(x)]
def gradient_old(x,
f,
h=1e-6):
'''
This method returns the the gradient vector of f at x
by using the (non-centered)ç finite difference method
'''
return[round(fin_dif_partial_old(x,f,i,h), 10) for i,_ in enumerate(x)]
x = [1.0,1.0,1.0]
print '{:.6f}'.format(f(x)), gradient_centered(x,f)
print '{:.6f}'.format(f(x)), gradient_old(x,f)
def euc_dist(v1,v2):
import numpy as np
import math
v = np.array(v1)-np.array(v2)
return math.sqrt(sum(v_i ** 2 for v_i in v))
# choosing a random vector
import random
import numpy as np
x = [random.randint(-10,10) for i in range(3)]
x
def step(x,
grad,
alpha):
'''
This function makes a step in the opposite direction of the gradient vector
in order to compute a new value for the target function.
'''
return [x_i - alpha * grad_i for x_i, grad_i in zip(x,grad)]
tol = 1e-15
alpha = 0.01
while True:
grad = gradient_centered(x,f)
next_x = step(x,grad,alpha)
if euc_dist(next_x,x) < tol:
break
x = next_x
print [round(i,10) for i in x]
step_size = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
import numpy as np
import random
# f = 2x
x = np.arange(10)
y = np.array([2*i for i in x])
# f_target = 1/n Sum (y - wx)**2
def target_f(x,y,w):
return np.sum((y - x * w)**2.0) / x.size
# gradient_f = 2/n Sum 2wx**2 - 2xy
def gradient_f(x,y,w):
return 2 * np.sum(2*w*(x**2) - 2*x*y) / x.size
def step(w,grad,alpha):
return w - alpha * grad
def BGD_multi_step(target_f,
gradient_f,
x,
y,
toler = 1e-6):
'''
Batch gradient descend by using a multi-step approach
'''
alphas = [100, 10, 1, 0.1, 0.001, 0.00001]
w = random.random()
val = target_f(x,y,w)
i = 0
while True:
i += 1
gradient = gradient_f(x,y,w)
next_ws = [step(w, gradient, alpha) for alpha in alphas]
next_vals = [target_f(x,y,w) for w in next_ws]
min_val = min(next_vals)
next_w = next_ws[next_vals.index(min_val)]
next_val = target_f(x,y,next_w)
if (abs(val - next_val) < toler):
return w
else:
w, val = next_w, next_val
print '{:.6f}'.format(BGD_multi_step(target_f, gradient_f, x, y))
%%timeit
BGD_multi_step(target_f, gradient_f, x, y)
def BGD(target_f,
gradient_f,
x,
y,
toler = 1e-6,
alpha=0.01):
'''
Batch gradient descend by using a given step
'''
w = random.random()
val = target_f(x,y,w)
i = 0
while True:
i += 1
gradient = gradient_f(x,y,w)
next_w = step(w, gradient, alpha)
next_val = target_f(x,y,next_w)
if (abs(val - next_val) < toler):
return w
else:
w, val = next_w, next_val
print '{:.6f}'.format(BGD(target_f, gradient_f, x, y))
%%timeit
BGD(target_f, gradient_f, x, y)
import numpy as np
x = np.arange(10)
y = np.array([2*i for i in x])
data = zip(x,y)
for (x_i,y_i) in data:
print '{:3d} {:3d}'.format(x_i,y_i)
print
def in_random_order(data):
'''
Random data generator
'''
import random
indexes = [i for i,_ in enumerate(data)]
random.shuffle(indexes)
for i in indexes:
yield data[i]
for (x_i,y_i) in in_random_order(data):
print '{:3d} {:3d}'.format(x_i,y_i)
import numpy as np
import random
def SGD(target_f,
gradient_f,
x,
y,
toler = 1e-6,
epochs=100,
alpha_0=0.01):
'''
Stochastic gradient descend with automatic step adaptation (by
reducing the step to its 95% when there are iterations with no increase)
'''
data = zip(x,y)
w = random.random()
alpha = alpha_0
min_w, min_val = float('inf'), float('inf')
epoch = 0
iteration_no_increase = 0
while epoch < epochs and iteration_no_increase < 100:
val = target_f(x, y, w)
if min_val - val > toler:
min_w, min_val = w, val
alpha = alpha_0
iteration_no_increase = 0
else:
iteration_no_increase += 1
alpha *= 0.95
for x_i, y_i in in_random_order(data):
gradient_i = gradient_f(x_i, y_i, w)
w = w - (alpha * gradient_i)
epoch += 1
return min_w
print 'w: {:.6f}'.format(SGD(target_f, gradient_f, x, y))
%reset
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_regression
from scipy import stats
import random
%matplotlib inline
# x: input data
# y: noisy output data
x = np.random.uniform(0,1,20)
# f = 2x + 0
def f(x): return 2*x + 0
noise_variance =0.1
noise = np.random.randn(x.shape[0])*noise_variance
y = f(x) + noise
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.xlabel('$x$', fontsize=15)
plt.ylabel('$f(x)$', fontsize=15)
plt.plot(x, y, 'o', label='y')
plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)')
plt.ylim([0,2])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.show
# Write your target function as f_target 1/n Sum (y - wx)**2
def target_f(x,y,w):
# your code here
# Write your gradient function
def gradient_f(x,y,w):
# your code here
def in_random_order(data):
'''
Random data generator
'''
import random
indexes = [i for i,_ in enumerate(data)]
random.shuffle(indexes)
for i in indexes:
yield data[i]
# Modify the SGD function to return a 'target_value' vector
def SGD(target_f,
gradient_f,
x,
y,
toler = 1e-6,
epochs=100,
alpha_0=0.01):
# Insert your code among the following lines
data = zip(x,y)
w = random.random()
alpha = alpha_0
min_w, min_val = float('inf'), float('inf')
iteration_no_increase = 0
epoch = 0
while epoch < epochs and iteration_no_increase < 100:
val = target_f(x, y, w)
if min_val - val > toler:
min_w, min_val = w, val
alpha = alpha_0
iteration_no_increase = 0
else:
iteration_no_increase += 1
alpha *= 0.95
for x_i, y_i in in_random_order(data):
gradient_i = gradient_f(x_i, y_i, w)
w = w - (alpha * gradient_i)
epoch += 1
return min_w
# Print the value of the solution
w, target_value = SGD(target_f, gradient_f, x, y)
print 'w: {:.6f}'.format(w)
# Visualize the solution regression line
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(x, y, 'o', label='t')
plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)', alpha=0.5)
plt.plot([0, 1], [0*w, 1*w], 'r-', label='fitted line', alpha=0.5, linestyle='--')
plt.xlabel('input x')
plt.ylabel('target t')
plt.title('input vs. target')
plt.ylim([0,2])
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.show
# Visualize the evolution of the target function value during iterations.
fig, ax = plt.subplots(1, 1)
fig.set_facecolor('#EAEAF2')
plt.plot(np.arange(target_value.size), target_value, 'o', alpha = 0.2)
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.grid()
plt.gcf().set_size_inches((10,3))
plt.grid(True)
plt.show()
def get_batches(iterable,
num_elem_batch = 1):
'''
Generator of batches from an iterable that contains data
'''
current_batch = []
for item in iterable:
current_batch.append(item)
if len(current_batch) == num_elem_batch:
yield current_batch
current_batch = []
if current_batch:
yield current_batch
x = np.array(range(0, 10))
y = np.array(range(10, 20))
data = zip(x,y)
np.random.shuffle(data)
for x in get_batches(data, 3):
print x
print
for batch in get_batches(data, 3):
print np.array(zip(*batch)[0]), np.array(zip(*batch)[1])
%reset
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_regression
from scipy import stats
import random
%matplotlib inline
# x: input data
# y: noisy output data
x = np.random.uniform(0,1,2000)
# f = 2x + 0
def f(x): return 2*x + 0
noise_variance =0.1
noise = np.random.randn(x.shape[0])*noise_variance
y = f(x) + noise
plt.plot(x, y, 'o', label='y')
plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)')
plt.xlabel('$x$', fontsize=15)
plt.ylabel('$t$', fontsize=15)
plt.ylim([0,2])
plt.title('inputs (x) vs targets (y)')
plt.grid()
plt.legend(loc=2)
plt.gcf().set_size_inches((10,3))
plt.show()
# f_target = 1/n Sum (y - wx)**2
def target_f(x,
y,
w):
return np.sum((y - x * w)**2.0) / x.size
# gradient_f = 2/n Sum 2wx**2 - 2xy
def gradient_f(x,
y,
w):
return 2 * np.sum(2*w*(x**2) - 2*x*y) / x.size
def in_random_order(data):
'''
Random data generator
'''
import random
indexes = [i for i,_ in enumerate(data)]
random.shuffle(indexes)
for i in indexes:
yield data[i]
def get_batches(iterable,
num_elem_batch = 1):
'''
Generator of batches from an iterable that contains data
'''
current_batch = []
for item in iterable:
current_batch.append(item)
if len(current_batch) == num_elem_batch:
yield current_batch
current_batch = []
if current_batch:
yield current_batch
def SGD_MB(target_f, gradient_f, x, y, epochs=100, alpha_0=0.01):
data = zip(x,y)
w = random.random()
alpha = alpha_0
min_w, min_val = float('inf'), float('inf')
epoch = 0
while epoch < epochs:
val = target_f(x, y, w)
if val < min_val:
min_w, min_val = w, val
alpha = alpha_0
else:
alpha *= 0.9
np.random.shuffle(data)
for batch in get_batches(data, num_elem_batch = 100):
x_batch = np.array(zip(*batch)[0])
y_batch = np.array(zip(*batch)[1])
gradient = gradient_f(x_batch, y_batch, w)
w = w - (alpha * gradient)
epoch += 1
return min_w
w = SGD_MB(target_f, gradient_f, x, y)
print 'w: {:.6f}'.format(w)
plt.plot(x, y, 'o', label='t')
plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)', alpha=0.5)
plt.plot([0, 1], [0*w, 1*w], 'r-', label='fitted line', alpha=0.5, linestyle='--')
plt.xlabel('input x')
plt.ylabel('target t')
plt.ylim([0,2])
plt.title('input vs. target')
plt.grid()
plt.legend(loc=2)
plt.gcf().set_size_inches((10,3))
plt.show()
%reset
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_regression
from scipy import stats
import random
%matplotlib inline
# the function that I'm going to plot
def f(x,y):
return x**2 + 5*y**2
x = np.arange(-3.0,3.0,0.1)
y = np.arange(-3.0,3.0,0.1)
X,Y = np.meshgrid(x, y, indexing='ij') # grid of point
Z = f(X, Y) # evaluation of the function on the grid
plt.pcolor(X, Y, Z, cmap=plt.cm.gist_earth)
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.gca().set_aspect('equal', adjustable='box')
plt.gcf().set_size_inches((6,6))
plt.show()
def target_f(x):
return x[0]**2.0 + 5*x[1]**2.0
def part_f(x,
f,
i,
h=1e-6):
w1 = [x_j + (h if j==i else 0) for j, x_j in enumerate(x)]
w2 = [x_j - (h if j==i else 0) for j, x_j in enumerate(x)]
return (f(w1) - f(w2))/(2*h)
def gradient_f(x,
f,
h=1e-6):
return np.array([round(part_f(x,f,i,h), 10) for i,_ in enumerate(x)])
def SGD(target_f,
gradient_f,
x,
alpha_0=0.01,
toler = 0.000001):
alpha = alpha_0
min_val = float('inf')
steps = 0
iteration_no_increase = 0
trace = []
while iteration_no_increase < 100:
val = target_f(x)
if min_val - val > toler:
min_val = val
alpha = alpha_0
iteration_no_increase = 0
else:
alpha *= 0.95
iteration_no_increase += 1
trace.append(x)
gradient_i = gradient_f(x, target_f)
x = x - (alpha * gradient_i)
steps += 1
return x, val, steps, trace
x = np.array([2,-2])
x, val, steps, trace = SGD(target_f, gradient_f, x)
print x
print 'Val: {:.6f}, steps: {:.0f}'.format(val, steps)
def SGD_M(target_f,
gradient_f,
x,
alpha_0=0.01,
toler = 0.000001,
m = 0.9):
alpha = alpha_0
min_val = float('inf')
steps = 0
iteration_no_increase = 0
v = 0.0
trace = []
while iteration_no_increase < 100:
val = target_f(x)
if min_val - val > toler:
min_val = val
alpha = alpha_0
iteration_no_increase = 0
else:
alpha *= 0.95
iteration_no_increase += 1
trace.append(x)
gradient_i = gradient_f(x, target_f)
v = m * v + (alpha * gradient_i)
x = x - v
steps += 1
return x, val, steps, trace
x = np.array([2,-2])
x, val, steps, trace2 = SGD_M(target_f, gradient_f, x)
print '\n',x
print 'Val: {:.6f}, steps: {:.0f}'.format(val, steps)
x2 = np.array(range(len(trace)))
x3 = np.array(range(len(trace2)))
plt.xlim([0,len(trace)])
plt.gcf().set_size_inches((10,3))
plt.plot(x3, trace2)
plt.plot(x2, trace, '-')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Set neural network hyperparameters (tidier at top of file!)
Step3: Set number of neurons for each layer
Step4: Define placeholders Tensors for inputs and labels
Step5: Define types of layers
Step6: Define dictionaries for storing weights and biases for each layer -- and initialize
Step7: Design neural network architecture
Step8: Build model
Step9: Define model's loss and its optimizer
Step10: Define evaluation metrics
Step11: Create op for variable initialization
Step12: Train the network in a session
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
lr = 0.1
epochs = 10
batch_size = 128
weight_initializer = tf.contrib.layers.xavier_initializer()
n_input = 784
n_dense_1 = 64
n_dense_2 = 64
n_dense_3 = 64
n_classes = 10
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# dense layer with ReLU activation:
def dense(x, W, b):
z = tf.add(tf.matmul(x, W), b)
a = tf.nn.relu(z)
return a
bias_dict = {
'b1': tf.Variable(tf.zeros([n_dense_1])),
'b2': tf.Variable(tf.zeros([n_dense_2])),
'b3': tf.Variable(tf.zeros([n_dense_3])),
'b_out': tf.Variable(tf.zeros([n_classes]))
}
weight_dict = {
'W1': tf.get_variable('W1', [n_input, n_dense_1], initializer=weight_initializer),
'W2': tf.get_variable('W2', [n_dense_1, n_dense_2], initializer=weight_initializer),
'W3': tf.get_variable('W3', [n_dense_2, n_dense_3], initializer=weight_initializer),
'W_out': tf.get_variable('W_out', [n_dense_3, n_classes], initializer=weight_initializer),
}
def network(x, weights, biases):
# two dense hidden layers:
dense_1 = dense(x, weights['W1'], biases['b1'])
dense_2 = dense(dense_1, weights['W2'], biases['b2'])
dense_3 = dense(dense_2, weights['W3'], biases['b3'])
# linear output layer (softmax):
out_layer_z = tf.add(tf.matmul(dense_3, weights['W_out']), biases['b_out'])
return out_layer_z
predictions = network(x, weights=weight_dict, biases=bias_dict)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(cost)
# calculate accuracy by identifying test cases where the model's highest-probability class matches the true y label:
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy_pct = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100
initializer_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(initializer_op)
print("Training for", epochs, "epochs.")
# loop over epochs:
for epoch in range(epochs):
avg_cost = 0.0 # track cost to monitor performance during training
avg_accuracy_pct = 0.0
# loop over all batches of the epoch:
n_batches = int(mnist.train.num_examples / batch_size)
for i in range(n_batches):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# feed batch data to run optimization and fetching cost and accuracy:
_, batch_cost, batch_acc = session.run([optimizer, cost, accuracy_pct], feed_dict={x: batch_x, y: batch_y})
# accumulate mean loss and accuracy over epoch:
avg_cost += batch_cost / n_batches
avg_accuracy_pct += batch_acc / n_batches
# output logs at end of each epoch of training:
print("Epoch ", '%03d' % (epoch+1),
": cost = ", '{:.3f}'.format(avg_cost),
", accuracy = ", '{:.2f}'.format(avg_accuracy_pct), "%",
sep='')
print("Training Complete. Testing Model.\n")
test_cost = cost.eval({x: mnist.test.images, y: mnist.test.labels})
test_accuracy_pct = accuracy_pct.eval({x: mnist.test.images, y: mnist.test.labels})
print("Test Cost:", '{:.3f}'.format(test_cost))
print("Test Accuracy: ", '{:.2f}'.format(test_accuracy_pct), "%", sep='')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quantum Double-slit Experiment
Step2: Now define the double_slit function and make it interactive
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy as np
import matplotlib.pyplot as plot
from scipy.integrate import trapz,cumtrapz
from IPython.html.widgets import interact, interactive
def distribute1D(x,prob,N):
takes any distribution which is directly proportional
to the number of particles, and returns data that is
statistically the same as the input data.
CDF = cumtrapz(prob)/np.sum(prob)
xsamples = np.zeros(N,float)
for i in range(0,N):
r = np.random.ranf()
xsamples[i] = x[CDF.searchsorted(r)]
return xsamples
#Quantum double-slit
#define the experimental parameters
#d = 15. # (micron) dist. between slits
#a = 10. # (micron) slit width.
#L = 1. # (m) dist. from slit to screen
#lam = 632.8 # (nm) He-Neon laser
def double_slit(d=15.,a=10.,L=3.,lam=632.8,N=0):
#convert d and a in microns to meters
dm = d*1.e-6
am = a*1.e-6
#convert wavelength from nm to m
wave=lam*1.e-9
# create the probability distribution
x = np.linspace(-0.2,0.2,10000)
#Isingle = np.sin(np.pi*am*x/wave/L)**2./(np.pi*am*x/wave/L)**2
Isingle = np.sinc(am*x/wave/L)**2.
Idouble = (np.cos(2*np.pi*dm*x/wave/L)**2)
Itot = Isingle*Idouble
#generate the random photon locations on the screen
#x according to the intensity distribution
xsamples = distribute1D(x,Itot,N)
#y randomly over the full screen height
ysamples = -0.2 + 0.4*np.random.ranf(N)
#Make subplot of the intensity and the screen distribution
fig = plt.figure(1,(10,6))
plt.subplot(2,1,1)
plt.plot(x,Itot)
plt.xlim(-0.2,0.2)
plt.ylim(0.,1.2)
plt.ylabel("Intensity",fontsize=20)
plt.subplot(2,1,2)
plt.xlim(-0.2,0.2)
plt.ylim(-0.2,0.2)
plt.scatter(xsamples,ysamples)
plt.xlabel("x (m)",fontsize=20)
plt.ylabel("y (m)",fontsize=20)
v5 = interact(double_slit,d=(1.,20.,1.), a=(5,50.,1.), L=(1.0,3.0),
lam=(435.,700.),N=(0,10000))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now given the current, we can use Ohm's law to determine the voltage drop across R1.
Step2: Thus we know that $V_x = 3 V_x + 2$ or $V_x = -1$. Of course, Lcapy can determine this directly. Here Ox is the name of the open circuit over which we wish to determine the voltage difference
Step3: Alternatively, we can query Lcapy for the voltage at node 'x' with respect to ground. This gives the same result.
Step4: Let's check the current with Lcapy
|
<ASSISTANT_TASK:>
Python Code:
Vx = V('V_x').Voc
I = (cct.V1.V - 4 * Vx) / (cct.R1.Z + cct.R2.Z)
I * cct.R1.Z
cct.V1.V - I * cct.R1.Z
cct.Ox.V
cct['x'].V
cct.R1.I
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2021-07-30T18:00:00'
tend = '2021-07-30T23:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
for orbit in orbits:
print(orbit[0], orbit[1])
# Get the solar parameter
from sunpy.coordinates import sun
angular_size = sun.angular_radius(t='now')
dx = angular_size.arcsec
print(dx)
sun_pa = planning.get_nustar_roll(tstart, 0.)
pa = planning.get_nustar_roll(tstart, 0*u.deg)
print(tstart)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
# Orbit 1 (Eastern limb)
offset = [-1050, -350.]*u.arcsec
for ind, orbit in enumerate(orbits):
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
test1 = SkyCoord(289.3792274160115, -22.304595055979675, unit = 'deg')
orb1 = SkyCoord(289.3855, -22.3051, unit = 'deg')
orb1.separation(test1)
orbit
import sunpy
sunpy.__version__
test1 = SkyCoord(289.898451566591, -22.158432904027155 , unit = 'deg')
orb1 = SkyCoord(289.9047, -22.1589, unit = 'deg')
orb1.separation(test1)
sun_pa = planning.get_nustar_roll(tstart, 0.)
pa = planning.get_nustar_roll(tstart, 45*u.deg)
offset = [0, 0.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sun_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
# Orbit 1 (AR)
offset = [900, -300.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
planning.make_test_region(sky_pos[0], sky_pos[1], pa, sun_pos[0], sun_pos[1], sun_pa)
print(pa)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here is about the simplest plot command you can get.
Step2: You can also plot y versus x values, as follows
Step3: You can use various modifiers to get different styles of plots.
Step4: A scatter plot is also available
Step5: Numerical plotting.
Step6: Let's get fancy and draw a figure with subplots.
Step7: Scatter plots
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from matplotlib.pyplot import *
plot([1,2,3,4])
plot([1,2,3,4],[1,4,9,16])
plot([1,2,3,4],[1,4,9,16],'or') # 'o' for dots, 'r' for red
scatter([1,2,3,4],[1,4,9,16])
from numpy import *
x = linspace(-2,2)
y = x**3-x
plot(x,y)
x = linspace(-3,3)
fig = figure(figsize=figaspect(0.2))
ax = fig.add_subplot(131)
ax.plot(x,cos(x),color='b')
ax.set_title('Cosine')
ax = fig.add_subplot(132)
ax.plot(x,sin(x),color='r')
ax.set_title('Sine')
ax = fig.add_subplot(133)
ax.plot(x,cos(x),'b',x,sin(x),'r')
ax.set_title('Both')
fig, ax = subplots()
num = 1000
s = 121
x1 = linspace(-0.5,1,num) + (0.5 - random.rand(num))
y1 = linspace(-5,5,num) + (0.5 - random.rand(num))
x2 = linspace(-0.5,1,num) + (0.5 - random.rand(num))
y2 = linspace(5,-5,num) + (0.5 - random.rand(num))
x3 = linspace(-0.5,1,num) + (0.5 - random.rand(num))
y3 = (0.5 - random.rand(num))
ax.scatter(x1, y1, color='r', s=2*s, marker='^', alpha=.4)
ax.scatter(x2, y2, color='b', s=s/2, marker='o', alpha=.4)
ax.scatter(x3, y3, color='g', s=s/3, marker='s', alpha=.4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can now use the triplets of parameters defined above to automate the process of training and evaluating ARIMA models on different combinations. In Statistics and Machine Learning, this process is known as grid search (or hyperparameter optimization) for model selection.
|
<ASSISTANT_TASK:>
Python Code:
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 5)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2],1) for x in list(itertools.product(p, d, q))]
print('Examples of parameter combinations for Seasonal ARIMA...')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
warnings.filterwarnings("ignore") # specify to ignore warning messages
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(ts_log,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data manipulation
Step2: By sorting the cities on population we immediately see the entries of a few of the largest cities in the world.
Step3: Assignment B
Step4: Assignment C
Step5: Assignment D
Step6: Assignment E
|
<ASSISTANT_TASK:>
Python Code:
import urllib.request as urllib, zipfile, os
url = 'http://download.maxmind.com/download/worldcities/'
filename = 'worldcitiespop.txt.gz'
datafolder = 'data/'
downloaded = urllib.urlopen(url + filename)
buf = downloaded.read()
try:
os.mkdir(datafolder)
except FileExistsError:
pass
with open(datafolder + filename, 'wb') as f:
f.write(buf)
import pandas as pd
cities = pd.read_csv(datafolder + filename, sep=',', low_memory=False, encoding = 'ISO-8859-1')
cities.tail()
#NAN VALUES: AKA NOT A NUMBER
cities.sort_values(by='Population', ascending=False).head()
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
y = list(cities.Latitude)
x = list(cities.Longitude)
plt.scatter(x,y, 1, [0,0,0,1])
dutch_cities = cities[ cities['Country'] =='nl' ]
plt.figure(figsize=[7,7]);
cm = plt.cm.get_cmap('YlOrRd')
y = dutch_cities.Latitude
x = dutch_cities.Longitude
pop = dutch_cities.Population
popsize = pop / 450
plt.xlim(3, 8)
plt.ylim(50.70, 53.6)
sc= plt.scatter(x,y,popsize,c=pop, cmap=cm, vmin=pop.min(), vmax=pop.max())
colobar = plt.colorbar(sc)
Eind = [i for i, j in enumerate(dutch_cities.City) if j == 'eindhoven']
Adam = [n for n, m in enumerate(dutch_cities.City) if m == 'amsterdam']
PopEind = dutch_cities.iloc[Eind].Population/1000
PopAdam = dutch_cities.iloc[Adam].Population/1000
plt.figure();
bars = plt.hist(np.asarray(dutch_cities.dropna().Population/1000), 100, normed=1);
plt.annotate('Eindhoven', xy=(PopEind,0), xytext=(PopEind, 0.005),
arrowprops=dict(facecolor='red', shrink = 0.01),
)
plt.annotate('Amsterdam', xy=(PopAdam,0), xytext=(PopAdam, 0.005),
arrowprops=dict(facecolor='grey', shrink = 0.05),
)
plt.xlabel('Aantal inwoners in duizenden')
plt.ylabel('Proportie steden met zoveel inwoners')
plt.figure(figsize=[20, 8]);
plt.subplot(2,1,1);
plt.title("Dutch City Distribution")
plt.hist(np.asarray(dutch_cities.dropna().Population/1000), bins=np.arange(0, 200, 1), normed=1);
plt.ylim(0.00, 0.10)
plt.subplot(2,1,2);
plt.title("Global City Distribution")
plt.hist(np.asarray(cities.dropna().Population/1000), bins=np.arange(0, 200, 1), normed=1);
## add the subplot of the world cities below this Dutch one
#It seems to me that there seem to be bigger cities in the Netherlands in general, although the Global distribution has the highest population in one city.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll import the VAE and GAN model classes from fauxtograph as well as the dependencies to read the dataset and display images in the notebook.
Step2: Import and Inspect the Data
Step3: We see that there are 56,443 fonts where the individual characters are 64x64 grayscale images. Moreover, each font has an upper-case sequence, a lower-case sequence, and then the digits 0-9 for a total of 62 characters a piece. It would appear that punctuation marks, etc. are left out of this set, but these should be interesting enough.
Step4: Adversarial Networks
Step5: Exploring the VAE Font Space
Step6: Exploring the GAN Font Space
|
<ASSISTANT_TASK:>
Python Code:
# Installs fauxtograph, its dependencies, scipy, and h5py.
!pip install --upgrade fauxtograph; pip install h5py; pip install scipy
# Optionally uncomment the line below and run for GPU capabilities.
# !pip install chainer-cuda-deps
# Optionally uncomment the line below to use wget to download the font dataset
# !wget https://s3.amazonaws.com/erikbern/fonts.hdf5
from fauxtograph import VAE, GAN
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import h5py
import numpy as np
from PIL import Image
import sys
%matplotlib inline
# These are two functions to reduce clutter in the
# notebook that pertain to training a model and plotting
# text in generated fonts. To review this code just look in the
# nb_tools.py file.
from nb_tools import train_vae, train_gan, interpolating_font, show_latent_2d
# Read our font data
fonts = h5py.File('/home/ubuntu/Data/fonts.hdf5','r')['fonts']
# Make a function to inspect what each font looks like.
def font_inspector(font_h5,font_number):
# Make and 8x8 grid with no spacing between plots.
fig = plt.figure(figsize=(8,8))
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0.0, hspace=0.0)
# Put each font character into the grid.
for i, letter in enumerate(255-font_h5[font_number]):
plt.subplot(8,8,i+1)
ax1 = plt.subplot(gs1[i])
ax1.imshow(letter, 'gray')
plt.axis('off')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_aspect('equal')
plt.suptitle("Total Data Shape = %s" % str(fonts.shape), fontsize=28)
plt.savefig('font_image.png', transparent=False)
# Inspect a font.
font_inspector(fonts, 0)
vae = VAE(img_width=64, img_height=64,
color_channels=62, latent_width=60,
kl_ratio=0.01, flag_gpu=True)
gan = GAN(img_width=64, img_height=64,
color_channels=1, latent_width=2,
flag_gpu=True)
#Trains the VAE on all leters at once fed in thtrough separate channels.
train_vae(fonts, vae, save_path='./models/vae_model', save_freq=1)
#Trains the gan on a specific font letter.
train_gan(fonts, gan, letter='A', save_path='./models/gan_model', save_freq=1)
# Load your (pre)trained VAE model. Change these to your model paths
# if you want to use your own.
loader = {}
loader['model'] = './pretrained/vae_model_model.h5'
loader['opt'] = './pretrained/vae_model_opt.h5'
loader['meta'] = './pretrained/vae_model_meta.json'
loader['flag_gpu'] = True
vae1 = VAE.load(**loader)
# Load your (pre)trained GAN model. Change these to your model paths
# if you want to use your own.
loader = {}
loader['dec'] = './pretrained/gan_model_dec.h5'
loader['disc'] = './pretrained/gan_model_disc.h5'
loader['dec_opt'] = './pretrained/gan_model_dec_opt.h5'
loader['disc_opt'] = './pretrained/gan_model_disc_opt.h5'
loader['meta'] = './pretrained/gan_model_meta.json'
loader['flag_gpu'] = True
gan1 = GAN.load(**loader)
string = 'The Quick Brown Fox Jumps Over The Lazy Dog 1234567890'
interpolating_font(vae1, string, stride=15)
show_latent_2d(gan1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sometimes mean and variance are not enough to describe a distribution. When we calculate variance, we square the deviations around the mean. In the case of large deviations, we do not know whether they are likely to be positive or negative. This is where the skewness and symmetry of a distribution come in. A distribution is <i>symmetric</i> if the parts on either side of the mean are mirror images of each other. For example, the normal distribution is symmetric. The normal distribution with mean $\mu$ and standard deviation $\sigma$ is defined as
Step2: A distribution which is not symmetric is called <i>skewed</i>. For instance, a distribution can have many small positive and a few large negative values (negatively skewed) or vice versa (positively skewed), and still have a mean of 0. A symmetric distribution has skewness 0. Positively skewed unimodal (one mode) distributions have the property that mean > median > mode. Negatively skewed unimodal distributions are the reverse, with mean < median < mode. All three are equal for a symmetric unimodal distribution.
Step3: Although skew is less obvious when graphing discrete data sets, we can still compute it. For example, below are the skew, mean, and median for S&P 500 returns 2012-2014. Note that the skew is negative, and so the mean is less than the median.
Step4: Kurtosis
Step5: The formula for kurtosis is
Step6: The histogram of the returns shows significant observations beyond 3 standard deviations away from the mean, multiple large spikes, so we shouldn't be surprised that the kurtosis is indicating a leptokurtic distribution.
Step7: Great, if properly calibrated we should expect to be wrong $5\%$ of the time at a 0.05 significance level, and this is pretty close. This means that the test is working as we expect.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# Plot a normal distribution with mean = 0 and standard deviation = 2
xs = np.linspace(-6,6, 300)
normal = stats.norm.pdf(xs)
plt.plot(xs, normal);
# Generate x-values for which we will plot the distribution
xs2 = np.linspace(stats.lognorm.ppf(0.01, .7, loc=-.1), stats.lognorm.ppf(0.99, .7, loc=-.1), 150)
# Negatively skewed distribution
lognormal = stats.lognorm.pdf(xs2, .7)
plt.plot(xs2, lognormal, label='Skew > 0')
# Positively skewed distribution
plt.plot(xs2, lognormal[::-1], label='Skew < 0')
plt.legend();
start = '2012-01-01'
end = '2015-01-01'
pricing = get_pricing('SPY', fields='price', start_date=start, end_date=end)
returns = pricing.pct_change()[1:]
print 'Skew:', stats.skew(returns)
print 'Mean:', np.mean(returns)
print 'Median:', np.median(returns)
plt.hist(returns, 30);
# Plot some example distributions
plt.plot(xs,stats.laplace.pdf(xs), label='Leptokurtic')
print 'Excess kurtosis of leptokurtic distribution:', (stats.laplace.stats(moments='k'))
plt.plot(xs, normal, label='Mesokurtic (normal)')
print 'Excess kurtosis of mesokurtic distribution:', (stats.norm.stats(moments='k'))
plt.plot(xs,stats.cosine.pdf(xs), label='Platykurtic')
print 'Excess kurtosis of platykurtic distribution:', (stats.cosine.stats(moments='k'))
plt.legend();
print "Excess kurtosis of returns: ", stats.kurtosis(returns)
from statsmodels.stats.stattools import jarque_bera
N = 1000
M = 1000
pvalues = np.ndarray((N))
for i in range(N):
# Draw M samples from a normal distribution
X = np.random.normal(0, 1, M);
_, pvalue, _, _ = jarque_bera(X)
pvalues[i] = pvalue
# count number of pvalues below our default 0.05 cutoff
num_significant = len(pvalues[pvalues < 0.05])
print float(num_significant) / N
_, pvalue, _, _ = jarque_bera(returns)
if pvalue > 0.05:
print 'The returns are likely normal.'
else:
print 'The returns are likely not normal.'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the $\frac{d\Gamma}{d\cos\theta}$ function, using the angle $\theta$ and the muon polarization $P_{\mu}$ as input variables
Step2: We are now ready to plot the $d\Gamma$ probability density function.
Step3: [Mariona]
Step4: 2. Build a Monte Carlo able to generate this PDF
Step5: Using Numpy
Step6: Plot the results of the Try-Reject Method
Step7: Inverse function p.d.f. generation
Step8: (without numpy explicitely)
Step9: (with numpy magic)
Step10: [Oriol] The execution time of the code without using numpy own methods is 18 seconds, whereas knowing numpy reduces it to 0.45 seconds (results with my computer with many things opened)
Step11: Estimate distribution parameters
Step12: Part 3
Step13: 3.2. What is the variance of the parameter $P_{\mu}$? Compute it numerically using Monte Carlo techniques for a given $P_{\mu}$ value.
Step14: Calculate the variance of the estimated Pmu both with the estimated mu and the theoretical mu and compare them.
Step15: We define the variance as
Step16: As we can see from the plot, the estimated mean resulting from the first iterations is not very accurate, but as the sample size increases, the estimated mean approaches the theoretical mean --as predicted by the Law of Large Numbers.
Step17: ATENCION
Step18: Como la generacion de numeros aleatorios se hace de manera independiente, y nuesto MC trabaja con arrays bidimensionales, podemos entrar los parámetros de manera que el propio montecarlo nos devuelva la matriz Nxm del paso 2 de la explicación.
Step19: Part 5
Step20: Calculate the moments of the distribution
Step21: For the values to follow a Gaussian distribution, the skewness factor would have to be 0 (sknewness measures symmetry, and Gaussian is 100% symmetrical), and the kurtosis factor would also have to be 0. Since none of these parameters is 0, the distribution is not Gaussian.
Step22: Calculate the moments of the distribution
Step23: Both the skewness and kurtosis factors are approximately zero, which implies the distribution is Gaussian.
Step24: 6.1. What is the probability density function associated to the number of entries per bin?
Step25: The left and right position of the bins (from the first to the last one) are contained in the "bin_edges" vector. In the cell above, we chose to study the first bin (bin_number=1), and to calculate the probability that a point falls within it, we have to select bin_edge[1] rather than bin_edge[0] (as choosing bin_edge[0] would yield probability equal to 0).
Step26: We can use the cumulative function to calculate the probability that a point falls within the bin.
Step27: Plot the binominal distribution on top the histogram for bin bin_number
Step28: As shown in this exercise, when the number of values in the sample num_sample is fixed, the entries in a given bin follow a binomial distribution. By extension, the number of entries of a given bin with respect to each of the others follows a multinomial distribution.
Step29: Calculate the moments of the N_62 vector
Step30: Mean and sigma give 0 and 1 respectively since we have rescaled the N_62 vector. The skewness and kurtosis parameters are approximately 0, which is indicative of a gaussian.
Step31: We will calculate the mean by multiplying the probability that a value falls within a given bin times the sample number. To this end, we will create a vector mean where we will store the $\mu$ of each bin, and use the cumulative function within two bin edges to obtain the probability, Prob_bin_63.
Step32: Compute the chi squared for each MC run.
Step33: Plot the $\chi_{^2}$ distribution.
Step34: 6.4. How does the $\chi^{2}$ computed above change when you change the number of bins?
Step35: [Oriol] He separado la definición de $P_{\mu}$ del resto de parametros, así si no haceis un print no sabreis cual es el valor de $P_{\mu}$ y no os quejareis tanto. Ahora podeis tratar de adivinar $P_{\mu}$ mirando al histograma con el plot de la pdf teorica comentado. También he movido la definicón de bin number abajo por si alguien se aburre y quiere mirar como varia el resultado con la misma muestra pero usando bins diferentes.
|
<ASSISTANT_TASK:>
Python Code:
# sets the plots to be embedded in the notebook
%matplotlib inline
# Import useful python libraries
import numpy as np # library to work with arrays
import matplotlib.pyplot as plt # plotting library (all weird commands starting with plt., ax., fig. are matplotlib
# they are not important, the define plots and set labels, axis...)
import random # (pseudo-)random numbers generation
import scipy.stats as stat # extra statistical functions (the basic are included in numpy)
import scipy.optimize as opt # optimization and root finding package
from scipy.misc import factorial
def gamma(cost,Pmu):
dg=.5*(1-Pmu*cost/3)
return dg
# Define parameters
Pmu=0.3
# Define plot variables
cost=np.linspace(-1,1,201)
dG=gamma(cost,Pmu)
# Plot
fig=plt.figure(1)
ax=fig.add_subplot(111)
ax.plot(cost,dG)
ax.set_title('$d\Gamma$ probability density function')
ax.set_xlabel(r'$\cos\theta$')
ax.set_ylabel('$d\Gamma$')
'''
plt.figure(1)
plt.plot(cost, dG)
plt.title('$d\Gamma$ probability density function')
plt.xlabel(r'$\cos\theta$')
plt.ylabel('$d\Gamma$')
'''
#%%timeit # returns the elapsed time when executing the cell, it executes the cell more than once,
# thus, it should only be uncommented when desired
# Montecarlo try-reject
N=10000000
# Define fmax
# The goal is to have the lowest fmax possible in order to increase the efficiency of the try-reject, knowing
# the shape of the p.d.f. this can be trivially done, because we know its maximum value must be either at 1 or -1
fmax=max(gamma(-1,Pmu),gamma(1,Pmu))
Xi1=np.empty(N) # Declaring an array instead of making it grow inside a loop speeds a lot the computation time
i=0
while i<N:
# 1st step of the try reject, choose a point inside the area [a,b]x[0,fmax]
r=random.random()
x=-1+2*r
y=random.random()*fmax
# 2nd step, check if it is inside the p.d.f. area and save x€[a,b] to Xi1, otherwise, reject it
if gamma(x,Pmu)>=y:
Xi1[i]=x
i+=1
#%%timeit
N=10000000
# 1st step of the try reject, choose a point inside the area [a,b]x[0,fmax]
# now, all N desired number are generated at once with numpy, thus, as some of them will be rejected, 2*N
# random numbers are generated
x=-1+2*np.random.random(2*N)
y=fmax*np.random.random(2*N)
# 2nd step, check if it is inside the p.d.f. area and save x€[a,b] to Xi1, otherwise, reject it
# in addition, as 2*N were generated in order to be safe and less than N x values will have been rejected
# a sample of size N must be chosen in order to compare with the other methods.
Xi2=x[y<=gamma(x,Pmu)][:N]
fig=plt.figure(2)
ax=fig.add_subplot(111)
ax.hist(Xi1,color='b',normed=1,bins=50,label='Montecarlo p.d.f.')
ax.hist(Xi2,color='g',normed=1,bins=50,label='Montecarlo p.d.f.',alpha=0.6)
ax.plot(cost,dG,'r--',linewidth=2,label='Theoretical')
ax.set_title('Try-reject Monte Carlo generated $d\Gamma$ p.d.f.')
ax.set_xlabel(r'$\cos\theta$')
ax.set_ylabel('$d\Gamma$')
ax.legend()
ax.set_ylim([0.4,0.6])
# Inverse function F^{-1}(r) implemented in Python
def Finv(r,Pmu):
# its arguments are:
# r : either int, float or np.array. Must be a value between 0 and 1
# Pmu : either int, float or np.array, its shape must be compatible with r in case of arrays
cost=(3.-6.*np.sqrt(.25-Pmu/3.*(r-.5-Pmu/12)))/Pmu
return cost
#%%timeit
N=10000000
Xinv1=np.empty(N)
for i in xrange(N):
r=random.random()
Xinv1[i]=Finv(r,Pmu)
# Montecarlo inverse function
def Montecarlo_inv_fun(Pmu,N=1000000):
# its arguments are:
# Pmu : np.array containing the value or values of Pmu for which the pdf will be generated
# Optional arguments:
# N : int, number of values in the pdf sample
m=len(Pmu)
#v.1
PmuV=np.array([Pmu]) #Transform Pmu into an 1xm matrix so that it matches the dimensions of r (Nxm)
r=np.random.random((N,m))
#v.2
#PmuV=np.empty((m,1))
#PmuV[:,0]=Pmu
#r=np.random.random((m,N))
Xinv = Finv(r,PmuV)
return Xinv
Xinv2=Montecarlo_inv_fun(np.array([Pmu]),10000000)[:,0]
# Now, in order to make the function compatible with arrays, Pmu must be reshaped into a np.array and
# afterwards, the obtained sample is converted from Nx1 matrix to vector of length N
#Plot for the inverse function method
fig=plt.figure(2)
ax=fig.add_subplot(111)
ax.hist(Xinv1,color='b',normed=1,bins=50,label='Montecarlo p.d.f.')
ax.hist(Xinv2,color='g',normed=1,bins=50,label='Montecarlo p.d.f.',alpha=0.6)
ax.plot(cost,dG,'r--',linewidth=2,label='Theoretical')
ax.set_title('Inverse function montecarlo generated $d\Gamma$ p.d.f.')
ax.set_xlabel(r'$\cos\theta$')
ax.set_ylabel('$d\Gamma$')
ax.legend()
ax.set_ylim([0.4,0.6])
mu=np.mean(Xinv2)
sigma=np.std(Xinv2) # equivalent to np.sqrt(np.var(Xi1))
skewness=stat.skew(Xinv2)
kurtosis=stat.kurtosis(Xinv2)
print 'The try-reject montecarlo generated distribution has:\n\tmean = %.6f,\n\t\
sigma = %.6f,\n\tskewness = %.6f\n\tand kurtosis = %.6f' %(mu, sigma, skewness, kurtosis)
N=int(1e7)
PmuVec=np.linspace(-1,1,30) # array of Pmu valus for which the pdf sample will be generated
X=Montecarlo_inv_fun(PmuVec,N) # call Montecarlo_inv_fun, which returns a matrix, containing N x values
# following the pdf of each Pmu in PmuVec
mu=np.mean(X,axis=0) # estimate the mean for each Pmu, thus, the result is a vector of the same length as PmuVec
muTh=-PmuVec/9. # calculate the theoretical mean for each Pmu
fig=plt.figure(1)
ax=fig.add_subplot(111)
ax.plot(PmuVec,mu,'ro',PmuVec,muTh,'b--')
ax.set_title('$P_{\mu}$ p.d.f. dependency')
ax.set_xlabel(r'$P_{\mu}$')
ax.set_ylabel('$\mu$')
ax.legend(['Estimated mean','Theoretical mean'])
sample_size = int(1e5)
Pmu_chosen_value = 0.5 #Choose a value of Pmu for this exercise
vec_length = 50 # Choose the length of the array "get_PmuVec"
get_PmuVec = np.ones(vec_length)*Pmu_chosen_value # Create vector with length = 50 filled with a given Pmu value from which the pdf sample will be generated
get_Pmus = Montecarlo_inv_fun(get_PmuVec, sample_size) # call Montecarlo_inv_fun, which returns a matrix with dimensions (sample_size X vec_length), containing x values
# following the pdf of each Pmu_chosen_value in get_PmuVec
estimated_mean = np.mean(get_Pmus, axis=0) #Get estimated mean
estimated_Pmu = -9*estimated_mean # estimate the mean for each Pmu. The result is a vector of the same length as PmuVec
variance1 = np.var(estimated_Pmu, ddof = 1) #Estimate the variance of the estimated Pmu. The "ddof" is used so that the division is not done as 1/N but as 1/N-1
variance2 = (sum((estimated_Pmu-Pmu_chosen_value)**2))/vec_length
print variance1
print variance2
Pmu_chosen_value = 0.5 #Choose a value of Pmu for this exercise
vec_length = 50 # Choose the length of the array "get_PmuVec"
get_PmuVec = np.ones(vec_length)*Pmu_chosen_value # Create vector with length = 50 filled with a given Pmu value from which the pdf sample will be generated
num_samples = 20
sample_sizes = np.logspace(start = 1, stop = 6.5, num = num_samples, dtype=int) #num refers to the nº of points generated between start and stop
estimated_Pmus = np.empty(num_samples)
for i,sample_size in enumerate(sample_sizes):
get_Pmus = Montecarlo_inv_fun(get_PmuVec, sample_size)
estimated_mean = np.mean(get_Pmus, axis = 0) #Get estimated mean
estimated_Pmus[i] = -9*np.mean(estimated_mean) # estimate the mean for each Pmu. The result is a vector of the same length as PmuVec
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogx(sample_sizes,estimated_Pmus,'ro',sample_sizes,Pmu_chosen_value*np.ones(num_samples),'b--')
ax.set_title('Proof of the Law of Large Numbers')
ax.set_xlabel('Sample size')
ax.set_ylabel('Estimated $P_{\mu}$')
ax.legend(['Estimated mean','Theoretical mean'],loc=3)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(sample_sizes, np.abs(estimated_Pmus-Pmu_chosen_value),'ro') #Move the two axes to logarithmic space
ax.set_title('Residuals')
ax.set_xlabel('Sample Size')
ax.set_ylabel('Estimated - Theoretical $P_{\mu}$')
#%%timeit
# Here we demonstrate the law of large numbers without reseting the sample
Pmu_41 = 0.5 #Choose a value of Pmu for this exercise
sample_size_41 = int(1e8) # Size of the sample, then, to estimate the LLN we will use the first m
# to calculate the mean, then the first 2m and so on
m = int(5e6) # Divide the Pmu means vector (of size vec_length_41) into m elements, thus, its legth will be
X_sample_41 = Montecarlo_inv_fun(np.array([Pmu_41]), sample_size_41)[:,0]
X_divided = np.array(np.split(X_sample_41,m))
partial_means = np.mean(X_divided,axis=1)
N_vec = np.arange(1,m+1)
cumulative_Pmus = -9*np.cumsum(partial_means)/N_vec
N_vec = sample_size_41/m*N_vec
#%%timeit
# After I did the above cell, I realized that taking into account how we have defined the MC,
# it can be done in a simpler way thanks to the fact that the random numbers we generate are independent
Pmu_41 = 0.5 #Choose a value of Pmu for this exercise
sample_size_41 = int(1e8) # Size of the sample, then, to estimate the LLN we will use the first m
# to calculate the mean, then the first 2m and so on
m = int(5e6) # Divide the Pmu means vector (of size vec_length_41) into m elements, thus, its legth will be
X_sample_41 = Montecarlo_inv_fun(np.ones(sample_size_41/m)*Pmu_41, m)
partial_means = np.mean(X_sample_41,axis=1)
N_vec = np.arange(1,m+1)
cumulative_Pmus = -9*np.cumsum(partial_means)/N_vec
N_vec = sample_size_41/m*N_vec
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogx(N_vec,cumulative_Pmus,'b-',alpha=0.7)
ax.semilogx(N_vec,np.ones(m)*Pmu_41,'k')
ax.set_title('Proof of the Law of Large Numbers')
ax.set_xlabel('Sample size')
ax.set_ylabel('Estimated $P_{\mu}$')
ax.legend(['Estimated mean','Theoretical mean'],loc='best')
N = 15 # number of samples for each montecarlo experiment
Pmu_chosen_value = 0.5 #Choose a value of Pmu for this exercise
mu_t=-Pmu_chosen_value/9.
num_repetitions = 15 # number of times the N x num_MC_experiments MC_samples matrix is generated
num_MC_experiments = 500000 # Choose the number of MC experiments to generate (it will be the # of t values in our histogram)
PmuVec_t = np.ones(num_MC_experiments)*Pmu_chosen_value # Create vector with length = num_MC_experiments filled with a given Pmu value from which the pdf sample will be generated
t_values = np.empty(num_repetitions*num_MC_experiments)
for repeat in xrange(num_repetitions):
MC_samples = Montecarlo_inv_fun(PmuVec_t, N) # matrix N x num_MC_experiments
x_bar = np.mean(MC_samples, axis=0) #Get estimated mean
s_square = np.var(MC_samples, axis=0, ddof=1) #Get estimated variance (sigma^2)
t_values[repeat*num_MC_experiments:(repeat+1)*num_MC_experiments] = np.sqrt(N/s_square)*(x_bar-mu_t)
fig = plt.figure()
ax = fig.add_subplot(111)
lim_prob = 0.001
grid_num = 100
x = np.linspace(stat.t.ppf(lim_prob, N), stat.t.ppf(1-lim_prob, N), grid_num)
ax.plot(x, stat.t.pdf(x, N), 'r-', label='t pdf (Theoretical)')
ax.hist(t_values,color='b',normed=1, bins = 20,label='Montecarlo p.d.f.')
ax.legend(loc=2)
mu_t = np.mean(t_values)
sigma_t = np.std(t_values) # equivalent to np.sqrt(np.var(Xi1))
skewness_t =stat.skew(t_values)
kurtosis_t =stat.kurtosis(t_values)
print '\tmean = %.6f,\n\t\
sigma = %.6f,\n\tskewness = %.6f\n\tand kurtosis = %.6f' %(mu_t, sigma_t, skewness_t, kurtosis_t)
N_clt = int(1e6)
Pmu_chosen_value = 0.5 #Choose a value of Pmu for this exercise
mu_t=-Pmu_chosen_value/9.
num_repetitions = 15 # number of times the N x num_MC_experiments MC_samples matrix is generated
num_MC_experiments = 200 # Choose the number of MC experiments to generate (it will be the # of t values in our histogram)
PmuVec_t = np.ones(num_MC_experiments)*Pmu_chosen_value # Create vector with length = num_MC_experiments filled with a given Pmu value from which the pdf sample will be generated
clt_values = np.empty(num_repetitions*num_MC_experiments)
for repeat in xrange(num_repetitions):
MC_samples = Montecarlo_inv_fun(PmuVec_t, N_clt) # matrix N x num_MC_experiments
x_bar = np.mean(MC_samples, axis=0) #Get estimated mean
s_square = np.var(MC_samples, axis=0, ddof=1) #Get estimated variance (sigma^2)
clt_values[repeat*num_MC_experiments:(repeat+1)*num_MC_experiments] = np.sqrt(N_clt/s_square)*(x_bar-mu_t)
# Expected gaussian pdf
mu = 0
sigma = 1
dist = stat.norm(mu, sigma)
x_pdf = np.linspace(-4, 4, 1000)
#Plot distributions
fig = plt.figure()
ax = fig.add_subplot(111)
lim_prob = 0.001
grid_num = 100
x = np.linspace(stat.t.ppf(lim_prob, N_clt), stat.t.ppf(1-lim_prob, N_clt), grid_num)
ax.plot(x, stat.t.pdf(x, N_clt), 'r-', lw=5, alpha=0.5, label='t pdf (Theoretical)')
ax.hist(clt_values,color='b',normed=1,bins=30,label='Montecarlo p.d.f.')
ax.plot(x_pdf, dist.pdf(x_pdf), '-k', label = 'Gaussian')
ax.legend()
mu_clt = np.mean(clt_values)
sigma_clt = np.std(clt_values)
skewness_clt =stat.skew(clt_values)
kurtosis_clt =stat.kurtosis(clt_values)
print '\tmean = %.6f,\n\t\
sigma = %.6f,\n\tskewness = %.6f\n\tand kurtosis = %.6f' %(mu_clt, sigma_clt, skewness_clt, kurtosis_clt)
Pmu = 0.3
num_sample_values = 1000
Xinv6 = Montecarlo_inv_fun(np.array([Pmu]),num_sample_values)[:,0] #Run Montecarlo with Pmu as a fixed parameter
fig = plt.figure()
ax = fig.add_subplot(111)
# Create the histogram (note: it's NOT normalized)
N_bin, bins, lis = ax.hist(Xinv6,color='b',bins=10,label='Montecarlo p.d.f.')
# N_bin is an array containing the values of the histogram bins
# bins is an array with the edges of the bins.
# The last parameter will not be used here.
ax.set_title('Inverse function montecarlo generated $d\Gamma$ p.d.f.')
ax.set_xlabel(r'$\cos\theta$')
ax.set_ylabel('$d\Gamma$')
ax.legend()
num_sample = 100
binomial_sample = 10000
Pmu_chosen_value = -0.3
bin_number = 4
PmuVec_61 = np.ones(binomial_sample)*Pmu_chosen_value # Create vector with length = num_MC_experiments filled with a given Pmu value from which the pdf sample will be generated
X_61 = Montecarlo_inv_fun(PmuVec_61,num_sample) #Run Montecarlo with Pmu as a fixed parameter
N_61 = np.empty(binomial_sample)
for i in xrange(binomial_sample):
hist, bin_edges = np.histogram(X_61[:,i],range=(-1,1)) # fixing the range to -1, 1 avoids the bins to start at the minimum
# of the sample which would be min(X_61) (around -0.98 but not -1)
N_61[i] = hist[bin_number-1]
bin_edges
def Fcumulative(cost,Pmu): # Cumulative function
F=.5*(cost+1)-Pmu/12.*(cost**2-1)
return F
ki=np.arange(num_sample)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(N_61,color='b',bins=10,normed=1,label='Montecarlo p.d.f.')
Prob_bin = Fcumulative(bin_edges[bin_number],Pmu_chosen_value)-Fcumulative(bin_edges[bin_number-1],Pmu_chosen_value)
ax.plot(ki,stat.binom.pmf(ki,num_sample,Prob_bin),'r--')
ax.set_title('Binomial distribution and Histogram for "bin_number" bin')
ax.set_xlabel('Number of entries in bin "bin_number"')
ax.set_ylabel('Number of succesful events (normalized)')
ax.legend()
num_sample = int(5e5)
binomial_sample = 1000
Pmu_chosen_value = -0.3
bin_number = 1
PmuVec_62 = np.ones(binomial_sample)*Pmu_chosen_value # Create vector with length = num_MC_experiments
#filled with a given Pmu value from which the pdf sample will be generated
X_62 = Montecarlo_inv_fun(PmuVec_62,num_sample) #Run Montecarlo with Pmu as a fixed parameter
N_62 = np.empty(binomial_sample)
for i in xrange(binomial_sample):
hist, bin_edges = np.histogram(X_62[:,i],range=(-1,1)) # fixing the range to -1, 1 avoids the bins to start at
#the minimum of the sample which would be min(X_62) (around -0.98 but not -1)
N_62[i] = hist[bin_number-1]
ki=np.arange(num_sample)
# Expected gaussian pdf
plot_center_value = Prob_bin*num_sample
sigma_gaussian = np.sqrt(Prob_bin*num_sample*(1-Prob_bin))
dist = stat.norm(0, 1)
# Shift and scale the histogram with respect to the estimated values. NOTE: Since we cannot generate infinite samples,
# we'll use np.mean and np.std instead of using the real values given by the theoretical distribution. This is likely
# to have an impact when shifting.
ki = (ki-np.mean(N_62))/np.std(N_62)
N_62 = (N_62-np.mean(N_62))/np.std(N_62)
x_pdf= np.linspace(-4,4,1000)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(N_62,color='b',bins=20,normed=1,label='Montecarlo p.d.f.')
Prob_bin = Fcumulative(bin_edges[bin_number],Pmu_chosen_value)-Fcumulative(bin_edges[bin_number-1],Pmu_chosen_value)
ax.plot(ki,stat.binom.pmf(ki,num_sample,Prob_bin),'r--')
ax.plot(x_pdf, dist.pdf(x_pdf), '-k', label = 'Gaussian')
ax.set_title('Binomial distribution tending to a Gaussian and Histogram for "bin_number" bin')
ax.set_xlabel('Number of entries in bin "bin_number"')
ax.set_ylabel('Number of succesful events (normalized)')
ax.set_xlim([-4,4])
ax.legend()
mu_62 = np.mean(N_62)
sigma_62 = np.std(N_62)
skewness_62 =stat.skew(N_62)
kurtosis_62 =stat.kurtosis(N_62)
print '\tmean = %.6f,\n\t\
sigma = %.6f,\n\tskewness = %.6f\n\tand kurtosis = %.6f' %(mu_62,
sigma_62, skewness_62, kurtosis_62)
num_sample_63 = 10000
chisquared_sample = 1000 # Number of times we will call the MC function -- the higher this value, the better will the
#theorical Chi squared distribution describe the "observed" one
Pmu_chosen_value = -0.3
bin_number_63 = 5
PmuVec_63 = np.ones(chisquared_sample)*Pmu_chosen_value # Create vector with length = num_MC_experiments filled with a given Pmu value from which the pdf sample will be generated
X_63 = Montecarlo_inv_fun(PmuVec_63, num_sample_63) #Run Montecarlo with Pmu as a fixed parameter
mean_63 = np.empty(bin_number_63)
bin_edges_63 = np.linspace(-1,1,bin_number_63+1) # Note that the dimensions of this parameter are 1x(bin_number+1), since we have
# to take into account the left-most and right-most bin
Prob_bin_63 = Fcumulative(bin_edges_63[1:], Pmu_chosen_value)-Fcumulative(bin_edges_63[:-1],Pmu_chosen_value)
mean_63 = Prob_bin_63*num_sample_63 #This will be our mu for each bin.
chi_squared_values = np.empty(chisquared_sample)
for i in xrange(chisquared_sample):
hist, bin_edges = np.histogram(X_63[:,i],range=(-1,1), bins = bin_number_63) # fixing the range to -1, 1 avoids the bins to start at the minimum
# of the sample which would be min(X_63) (around -0.98 but not -1)
chi_squared_values[i] = np.sum((hist-mean_63)**2/(mean_63*(1-Prob_bin_63)))
x_pdf_chisquared = np.linspace(min(chi_squared_values),max(chi_squared_values),1000)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(chi_squared_values,color='b', bins=15,normed=1,label = '$\chi_{^2}$ values')
ax.plot(x_pdf_chisquared,stat.chi2.pdf(x_pdf_chisquared,bin_number_63),'r--',lw=2, label = '$\chi_{^2}$ distribution (theoretical)')
ax.set_title('Expected and Observed Distribution')
ax.set_xlabel('Chi Squared values')
ax.legend()
'''STEP 1'''
pmu_7 = random.uniform(-1,1)
sample_size_n = int(1e3)
sample_7 = Montecarlo_inv_fun(np.array([pmu_7]), sample_size_n)[:,0]
bin_number_7 = 10
'''STEP 2'''
#Obtain the theoretical pdf
dG = gamma(cost,pmu_7)
#Plot the histogram and the theoretical distribution
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sample_7,color='b', bins = bin_number_7,normed=1, range=(-1,1), label = 'Sample values')
#ax.plot(cost, dG, label = 'Theoretical pdf')
ax.set_title('Histogram data')
''' STEP 3'''
hist_7, bin_edges_7 = np.histogram(sample_7,range=(-1,1), bins = bin_number_7)
#Define the likelihood function
def likelihood(pmu_try, hist_values):
n_bin = len(hist_values)
N = sum(hist_values)
bin_edges = np.linspace(-1, 1, n_bin+1)
Prob_bin = Fcumulative(bin_edges[1:], pmu_try) - Fcumulative(bin_edges[:-1], pmu_try)
# This is the theoretical likelihood as defined in the comment above. Since the values that we obtain are very
# small, we have decided to work with the logarithm of this function.
'''
nfact = factorial(len(hist_values))
num = np.power(Prob_bin_7,hist_values)
den = factorial(hist_values)
likelihood = nfact*np.prod(num/den)
'''
#term1 = -np.log(factorial(N)) # note that the log likelihood is defined as -log(L)
term2 = -np.sum(hist_values*np.log(Prob_bin))
#term3 = -np.sum(np.log(factorial(hist_values)))
#likelihood = term1 + term2 - term3
return term2
# define the derivative of the likelihood function
def d_likelihood(pmu_try, hist_values):
n_bin = len(hist_values)
N = sum(hist_values)
bin_edges = np.linspace(-1, 1, n_bin+1)
Prob_bin = Fcumulative(bin_edges[1:], pmu_try) - Fcumulative(bin_edges[:-1], pmu_try)
d_like = np.sum(hist_values/Prob_bin*(np.square(bin_edges[:-1])-np.square(bin_edges[1:])))/12.
return d_like
pmu_grid = np.linspace(-1,1,50)
like = np.empty(len(pmu_grid))
Dlike = np.empty(len(pmu_grid))
for i,pmu in enumerate(pmu_grid):
like[i] = likelihood(pmu, hist_7)
Dlike[i] = d_likelihood(pmu, hist_7)
fig=plt.figure()
ax1=fig.add_subplot(121)
ax1.plot(pmu_grid, like, '.')
ax2=fig.add_subplot(122)
ax2.plot(pmu_grid, Dlike, '.')
#res = opt.minimize(fun=likelihood, x0=0, args=(hist_7))
#Pmu_ML = res.x[0]
Pmu_ML = opt.fsolve(d_likelihood, 0, args=(hist_7))
print 'Real value of Pmu:%.4g' %pmu_7
L_ML = likelihood(Pmu_ML,hist_7)
fun_var = lambda Pmu : likelihood(Pmu,hist_7)-L_ML-0.5
err_plus = opt.fsolve(fun_var,Pmu_ML+1)[0]-Pmu_ML
err_minus = Pmu_ML-opt.fsolve(fun_var,Pmu_ML-1)[0]
print 'ML estimate of Pmu:\n\tPmu=%.4g (-%.3g,+%.3g)' %(Pmu_ML,err_minus,err_plus)
print 'Mean estimate of Pmu:\n\tPmu=%.4g (%.3g)' %(-9*np.mean(sample_7), 9*np.std(sample_7)/np.sqrt(sample_size_n)) # aquí creo que
# la std del estimador con la media es la std de Pmu=a*x donde a es la constante -9 y x la variable aleatoria mean,
# por tanto, la std de Pmu es abs(a)*std(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Math with Python
Step2: Logical Indexing
Step3: numpy arrays as a matrix
Step4: $ 3 x_0 + x_1 = 9 $
Step5: Symbolic Math with Sympy
Step6: Now we can initialize the console to have very nice looking mathematical expressions. If we dont do this, the text will be simple ascii characters
Step7: I really enjoy using sympy because I can type my equations in, evaluate them, and it shows them very nice, which is easier to read then lines of text. Simple expressions are easy to make
Step8: Since this is a windows machine, the latex command printed two backslashes, if we change that to single backslashes we can get the latex version of the summation
Step9: mixed symbols and numerals linear algebra
Step10: Pythons list is a generic data storage object. it can be easily extended to a numpy array, which is specialized for numerical and scientific computation
Step11: Symbolic mathematics with sympy
Step12: Signal Processing
Step13: Differential Equations
Step14: Differential Equations Quiver plot
|
<ASSISTANT_TASK:>
Python Code:
# setup
import numpy as np
import sympy as sp
import scipy
from scipy import linalg
from pprint import pprint
sp.init_printing(use_latex='mathjax')
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (12, 8) # (width, height)
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 16
from matplotlib import patches
#get_ipython().magic('matplotlib') # seperate window
get_ipython().magic('matplotlib inline') # inline plotting
# using the range function to create a numpy array
x = np.array(range(10))
print('x = ',x, 'as a',x.dtype)
# using numpy function arange
x = np.arange(10)
print('x = ',x, 'as a',x.dtype)
x = np.linspace(0,9,10)
print('x = ',x, 'as a',x.dtype)
type(x)
# multiple array operations
x*3
# exponent array operations
x**2
# trig array operations
np.sin(x)
x.sum()
print([k for k in dir(x) if k[:2] != '__' ])
# logical indexing
3 < x
# negate logical indexing
~(3 < x)
# logical indexing and
(3 < x) & (x < 5)
# logical index or
(3 < x) | (x < 5)
# logical indexing and
(3 < x) & (x < 5)
z = (3 < x) & (x < 5)
z
# if 0,1 is preferred, changethe datatype
z.astype(np.int)
# createa 1x3 array
c = np.array([1,2,3])
print('c =',c,' of shape',c.shape)
#does not convert to 3x1 since it is only a 1d array
# this is not what I would expected, so I will call it a nuance of arrays in python
c.transpose()
# we must reshape it
c = c.reshape((1,3))
print('c =',c,' of shape',c.shape)
c.transpose()
# or when we defined it, add brackets in it to create a true 1x3 array
c = np.array([[1,2,3]])
print('c =',c,' of shape',c.shape)
# now we can transpose it.
c.transpose()
print('c =',c,' of shape',c.shape)
c.shape
# lets creat some matrices (but recall use the array)
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 4], [5, 7]])
a
b
a * b
# to perform a matrix multiplecation on an array, use
a @ b
# or
np.dot(a,b)
# to solve ax=b, use
linalg.solve(a,b)
# or slower and less accurate
linalg.inv(a) @ b
# or
linalg.inv(a).dot(b)
# 3 * x0 + x1 = 9 and x0 + 2 * x1 = 8
a = np.array([[3,1], [1,2]])
b = np.array([[9],[8]])
x = np.linalg.solve(a, b)
x
# lets create a 2x5 zeros matrix
c = np.zeros((2,5))
c
# With numpy, we can create arrays of numbers as well as functions that accept arrays
f = lambda x,y: np.sqrt(x**2+y**2)
f(3,4)
a = 3.1
b = np.arange(15,35, 0.75)
f(a,b)
# We can also perform numerical differentiation and integration
x = np.arange(-np.pi,np.pi, 0.01)
y = np.sin(x)*np.exp(-x)
plt.plot(x,y);
plt.xlabel('x')
plt.ylabel('$f(x)$')
plt.title('numerical integration example')
plt.show()
# Using the trapezoidal rule, we can numerically integrate the function
np.trapz(y,x)
%reset
import sympy as sp
import numpy as np
sp.init_printing()
x,y = sp.symbols('x y')
y = (sp.pi + x)**2
y
y.subs(x,1.3)
# or to get a number
sp.N(y.subs(x,1.3))
# calculus is also easy
sp.diff(y)
f = sp.Function('f')(x)
f = sp.sin(x)*sp.exp(-x)
f
F = sp.integrate(f)
F
F1 = sp.integrate(f,(x,-sp.pi,sp.pi ))
# as shown numerically, we can symbolically evaluate the function
# and get the same result
F1
float(F1)
sp.plot(f,(x,-sp.pi,sp.pi));
sp.plot(F,(x,-sp.pi,sp.pi));
# Other various math tasks can be much easier when using sympy
n = sp.symbols('n')
sum_fcn = sp.Sum(1/n**2, (n,1,10))
sum_fcn
# to evaluate the summation simply add the evalf method
sum_fcn.evalf()
sp.latex(sum_fcn)
from IPython.display import Latex
Latex('$sum\_fcn=' + sp.latex(sum_fcn) + '$' )
# linear algebra
s11, n22, e33 = sp.symbols("sigma11 nu22 epsilon33")
A = sp.Matrix([[s11],[n22],[e33]])
A
m11, m12, m21, m22 = sp.symbols("m11, m12, m21, m22")
b1, b2 = sp.symbols("b1, b2")
A = sp.Matrix([[m11, m12],[m21, m22]])
A
b = sp.Matrix([[b1], [b2]])
b
A**2
A.det()
A.inv()
x = sp.Symbol('x') # x = var('x')
M = sp.Matrix([[2,x],[x,3]])
M
M.eigenvals()
M.eigenvects()
M.eigenvects()[1][0]
Mval = M.eigenvects()[1][0]
Mval.evalf(subs={x:3.14})
print(sp.latex(M))
from IPython.display import Latex
Latex('$M=' + sp.latex(M) + '$' )
np.zeros((5,3))
np.array([[1,2],[3,4]])
np.array([[1,2],[3,4]])
# Matrix multiplication can be achieved using the dot method
i = [[1,0,0],[0,1,0],[0,0,1]] # identiy matrix
a = [[4,3,1],[5,7,2],[2,2,2]]
np.dot(i,a)
#Or, matrix multiplication can be done if a matrix is explicitly defined
np.array(i) @ np.array(a)
# Notice, when arrays are mutliplied, we get the dot product
m = np.array(i) * np.array(a)
m
m.T # transpose
m**2
np.array(a)**2
m
m[:,2]
m[2,:]
m[:2,:2]
m[1:,1:]
# import sympy library and initialize latex printing
import sympy as sp
#sp.init_printing()
#sp.init_printing(use_latex='matplotlib')
sp.init_printing(use_latex='mathjax')
# add a symbolic character
x = sp.Symbol('x')
sp.sqrt(x**2)
r = sp.Rational(11, 13)
r
float(r)
f = sp.Function('f')
f
f(x)
h = sp.Lambda(x,x**2)
h
w = 2*(x**2-x)-x*(x+1)
w
w.args
sp.simplify(w)
sp.factor(x**2-1)
#partial fractions
y = 1/(x**2+3*x+2)
y
sp.apart(y,x)
f = sp.Function('f')(x)
sp.diff(f,x)
y = sp.Symbol('y')
g = sp.Function('g')(x,y)
g.diff(x,y)
a,b,c,d = sp.symbols("a b c d")
M = sp.Matrix([[a,b],[c,d]])
M
M*M
# if ipython is to be used as a calculator initialize with
from sympy import init_session
init_session()
from sympy import oo, Function, dsolve, Eq, Derivative, sin,cos,symbols
from sympy.abc import x
import sympy as sp
import numpy as np
import matplotlib.pyplot as mp
get_ipython().magic('matplotlib inline')
# this will print output as unicode
# assign a sympy variable
x = sp.var('x')
x
#assign a function
f = sp.sin(6*x)*sp.exp(-x)
f
f.subs(x,3)
float(f.subs(x,3))
sp.plot(f,(x,-5,5))
# a onetime pretty print
sp.pprint(f)
#or we can print the latex rendering
sp.latex(f)
# first derivative
df = f.diff()
df
# differentaite f'' wrt x
sp.diff(f,x,1)
# substitute x with pi
f.subs(x,np.pi)
#%% Numeric Computation from the documentation
from sympy.abc import x
# lambdify using the math module, 10^2 faster than subs
expr = sp.sin(x)/x
f = sp.lambdify(x,expr)
f(3.14)
# lambdify using numpy
f = sp.lambdify(x,expr, "numpy")
f(np.linspace(1,3.14,20))
## Signal Processing
#Page 174 Introduction for python for Science - David Pine
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline') # inline plotting
width = 2.0
freq = 0.5
t = np.linspace(-10, 10, 101) # linearly space time array
g = np.exp(-np.abs(t)/width)*np.sin(2.0 * np.pi * freq * t)
dt = t[1]-t[0] # increment between times in time array
G = fftpack.fft(g) # FFT of g
f = fftpack.fftfreq(g.size, d=dt) # frequenies f[i] of g[i]
f = fftpack.fftshift(f) # shift frequencies from min to max
G = fftpack.fftshift(G) # shift G order to coorespond to f
fig = plt.figure(1, figsize=(8,6), frameon=False)
ax1 = fig.add_subplot(211)
ax1.plot(t, g)
ax1.set_xlabel('t')
ax1.set_ylabel('g(t)')
ax2 = fig.add_subplot(212)
ax2.plot(f, np.real(G), color='dodgerblue', label='real part')
ax2.plot(f, np.imag(G), color='coral', label='imaginary part')
ax2.legend()
ax2.set_xlabel('f')
ax2.set_ylabel('G(f)')
plt.show()
from pylab import *
from scipy import fft
get_ipython().magic('matplotlib inline') # inline plotting
N = 2**9
F = 25
t = arange(N)/float(N)
x = cos(2*pi*t*F) + rand(len(t))*3
subplot(2,1,1)
plot(t,x)
ylabel('x []')
xlabel('t [seconds]')
title('A cosine wave')
grid()
subplot(2,1,2)
f = t*N
xf = fft(x)
plot(f,abs(xf))
title('Fourier transform of a cosine wave')
xlabel('xf []')
ylabel('xf []')
xlim([0,N])
grid()
show()
# note the spike at 25 hz and 512-25
# see here for example scripts
# C:\Users\Neal\Anaconda3\Lib\site-packages\sympy\mpmath\tests
from sympy import Function, dsolve, Eq, Derivative, sin,cos,symbols
from sympy.abc import x
import numpy as np
import sympy as sp
import matplotlib.pyplot as mp
import matplotlib.pyplot as plt
f = Function('f')
deq = dsolve(Derivative(f(x), x,x) + 9*f(x), f(x))
deq
diffeq1_sym = deq.args[1]
diffeq1_sym
diffeq1 = diffeq1_sym.subs({'C1':1, 'C2':0.5})
diffeq1
diffeq1_f = sp.lambdify(x,diffeq1, "numpy")
diffeq1_f
diffeq1arr = diffeq1_f(np.linspace(1,3.14,20))
diffeq1arr
# numpy plot
plt.plot(diffeq1_f(np.linspace(-10,10,2000)));
plt.title('plot of the numpy array');
# sympy plot
sp.plot(diffeq1, title='plot of the sympy function');
# quiver plot
f=lambda x: [x[0]**2 - 2*x[0] - x[1] + 0.5, x[0]**2 + 4*x[1]**2 - 4]
x,y=np.mgrid[-0.5:2.5:24j,-0.5:2.5:24j]
U,V=f([x,y])
plt.quiver(x,y,U,V,color='r', \
linewidths=(0.2,), edgecolors=('k'), \
headaxislength=5)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: downloading nltk gutenberg corpus, if not downloaded already
Step2: files in test data
Step3: preparing input
Step4: the library takes as input a nested list whose elements are the original documents as nested lists. the nested lists represent the document's structure (sections, paragraphs, sentences). this can be an in-memory nested list or some generator that yields a nested list when it is iterated over.
Step5: excerpt
Step6: processing using corpushash
Step7: instatiating CorpusHash class, which hashes the provided corpus to the corpus_path
Step8: that is it. corpushash's work is done.
Step9: loading libraries we need for processing
Step10: defining iterable for gensim
Step11: as the analyst will not have access to this convenience function, we will build a new generator for this.
Step12: building gensim dictionary
Step13: bag-of-words
Step14: we must re-instantiate the generator, else it'll be depleted.
Step15: tf-idf model
Step16: after calculating the frequencies, its time to transform the given bag-of-words vectors to corresponding tf-idf weights vectors.
Step17: example of token ids and their tf-idf weights
Step18: validating tf-idf model on unencoded corpus
Step19: obtaining decode dictionary
Step20: here we are reinstantiating the generators, applying the tf-idf model, and then iterating over the model's results to replace the hashed tokens with their decoded counterparts. the result is saved to disk. make sure your indexer doesn't change document order (gutenberg.fileids does it), else the names may mismatch with the contents.
Step21: 2. tf-idf on decoded corpus
Step22: creating the dictionary that maps a token to an ID
Step23: creating a generator that yields the bag-of-words model of a document when iterated over
Step24: tf-idf model
Step25: applying the model to the documents (not forgetting to reinstantiate the generators)
Step26: comparing results of encoded and decoded corpus
|
<ASSISTANT_TASK:>
Python Code:
import os
import string
import random
import pickle
import nltk
from nltk.corpus import gutenberg
#nltk.download("gutenberg")
gutenberg.fileids()
corpus_path = os.path.join(os.getcwd(), 'guten_test')
corpus_path
%%time
decoded_gutencorpus = []
for document_name in gutenberg.fileids():
document = [word.lower() for word in gutenberg.words(document_name) if word not in string.punctuation and not word.isdigit()]
decoded_gutencorpus.append(document)
document = random.choice(decoded_gutencorpus)
print(document[:100])
import corpushash as ch
%time hashed_guten = ch.CorpusHash(decoded_gutencorpus, corpus_path)
encoded_corpus_path = hashed_guten.public_path
encoded_corpus_path
os.path.exists(encoded_corpus_path)
import json
import gensim
for i in hashed_guten.read_hashed_corpus():
print(i[0], i[1][:1])
def encoded_gutenberg_yielder(corpus_path):
for ix in range(len(gutenberg.fileids())):
path = os.path.join(corpus_path, '{}.json'.format(ix))
with open(path, 'r') as fp:
document_tokens = json.load(fp)
yield document_tokens
example_doc = encoded_gutenberg_yielder(encoded_corpus_path)
print("\n".join(next(example_doc)[:10]))
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutendict = gensim.corpora.Dictionary(encoded_gutencorpus)
#encoded_gutendict.save_as_text('enc_dict.txt', sort_by_word=False)
print("the number of unique words in our corpus is {}.".format(len(encoded_gutendict)))
def bow_gutenberg_yielder(corpus, dictionary):
for document_tokens in corpus:
yield dictionary.doc2bow(document_tokens)
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutenbow = bow_gutenberg_yielder(encoded_gutencorpus, encoded_gutendict)
print('token', '>>', "(token id, frequency in document)\n")
for i in next(encoded_gutenbow)[:10]:
print(encoded_gutendict.get(i[0]), '>>', i)
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutenbow = bow_gutenberg_yielder(encoded_gutencorpus, encoded_gutendict)
encoded_tfidf = gensim.models.TfidfModel(encoded_gutenbow)
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutenbow = bow_gutenberg_yielder(encoded_gutencorpus, encoded_gutendict)
encoded_guten_tfidf = encoded_tfidf[encoded_gutenbow]
print('token', '>>', "(token id, tf-idf weight)\n")
for i in encoded_guten_tfidf:
for k in i[:10]:
print(encoded_gutendict.get(k[0]), '>>', k)
break
decode_dictionary_path = hashed_guten.decode_dictionary_path
os.path.isfile(decode_dictionary_path)
with open(decode_dictionary_path, 'rb') as f:
decode_dictionary = pickle.load(f)
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutenbow = bow_gutenberg_yielder(encoded_gutencorpus, encoded_gutendict)
encoded_guten_tfidf = encoded_tfidf[encoded_gutenbow]
for path, document in zip(gutenberg.fileids(), encoded_guten_tfidf):
decoded_document = []
for tuple_value in document:
hashed_token = encoded_gutendict.get(tuple_value[0]) # 6 - > ed07dbbe94c8ff385a1a00e6720f0ab66ac420...
token, _ = decode_dictionary[hashed_token] # 'ed07dbbe94c8ff385a1a00e... -> 'genesis'
decoded_document.append("{}: {}".format(token, tuple_value[1]))
fname = 'decoded_'+ path
with open(os.path.join(corpus_path, fname), 'w') as f:
f.write("\n".join(decoded_document))
example_id = random.choice(gutenberg.fileids())
with open(os.path.join(corpus_path, 'decoded_' + example_id), 'r') as f:
decoded_doc = f.read().splitlines()
print(example_id, '>>')
print("\n".join(decoded_doc[:10]))
example_document = random.choice(decoded_gutencorpus)
print(example_document[:10])
decoded_gutendict = gensim.corpora.Dictionary(decoded_gutencorpus)
#decoded_gutendict.save_as_text('dec_dict.txt', sort_by_word=False)
len(decoded_gutendict)
decoded_gutenbow = bow_gutenberg_yielder(decoded_gutencorpus, decoded_gutendict)
print('token', '>>', "(token id, frequency in document)\n")
for i in next(decoded_gutenbow)[:10]:
print(decoded_gutendict.get(i[0]), '>>', i)
decoded_gutenbow = bow_gutenberg_yielder(decoded_gutencorpus, decoded_gutendict)
decoded_tfidf = gensim.models.TfidfModel(decoded_gutenbow)
decoded_gutenbow = bow_gutenberg_yielder(decoded_gutencorpus, decoded_gutendict)
decoded_guten_tfidf = decoded_tfidf[decoded_gutenbow]
print('token', '>>', "(token id, tf-idf weight)\n")
for i in decoded_guten_tfidf:
for k in i[:10]:
print(decoded_gutendict.get(k[0]), '>>', k)
break
encoded_gutencorpus = encoded_gutenberg_yielder(encoded_corpus_path)
encoded_gutenbow = bow_gutenberg_yielder(encoded_gutencorpus, encoded_gutendict)
encoded_guten_tfidf = encoded_tfidf[encoded_gutenbow]
decoded_gutenbow = bow_gutenberg_yielder(decoded_gutencorpus, decoded_gutendict)
decoded_guten_tfidf = decoded_tfidf[decoded_gutenbow]
%%time
encoded_tfidf, decoded_tfidf = {}, {}
for encoded_document, decoded_document in zip(encoded_guten_tfidf, decoded_guten_tfidf):
for encoded_item, decoded_item in zip(encoded_document, decoded_document):
hashed_token = encoded_gutendict.get(encoded_item[0])
original_token = decode_dictionary[hashed_token][0] # get hash, ignoring salt
encoded_tfidf[original_token] = round(encoded_item[1], 7) # rounding because python <3.6 seems to represent floats inconsistently
decoded_tfidf[decoded_gutendict.get(decoded_item[0])] = round(decoded_item[1], 7)
print(encoded_tfidf == decoded_tfidf)
random_token = random.choice(list(encoded_tfidf.keys()))
print("example token: tf-idf weight in encoded corpus | in decoded corpus\n{:^35}: {} | {}".format(random_token, encoded_tfidf[random_token], decoded_tfidf[random_token]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This dataset is a debug dump from a Lustre filesystem. Typically these events occur due to code bugs (LBUG), heavy load, hardware problems, or misbehaving user application IO.
Step2: Let's take a look at the first five lines of the debug log. This log is colon-delimited, and roughly corresponds to the following information
Step3: Now let's split each line of the RDD into lowercase "words".
Step4: Notice that this map returns immediately; no actions have been taken- the DAG has been updated to prepare for transformations. I like to think of this as analogous to a page fault, but applying to a Directed Acyclic Graph.
Step5: Part 2
Step6: Did it work?
Step7: Now we issue an action to the RDD
Step8: And as a percent of the overall file?
Step9: Now let's determine the effect of the flatMap
Step10: Now filter out "words" longer than 2 characters.
Step11: To sort words by number of occurences we map each word of each line to a tuple
Step12: We utilize reduceByKey
Step13: We swap the order of the tuple's contents to sort by the number rather than words. The argument "False" passed to sortByKey instructs it to sort descending.
|
<ASSISTANT_TASK:>
Python Code:
from pyspark import SparkConf, SparkContext
import re
sc
partitions = 18
parlog = sc.textFile("/lustre/janus_scratch/dami9546/lustre_debug.out", partitions)
parlog.take(5)
words = parlog.map(lambda line: re.split('\W+', line.lower().strip()))
words.take(2)
mfds = words.filter(lambda x: 'mfd' and 'change' in x)
mfds.take(2)
mfds.count()
'{0:0.2f}%'.format((mfds.count()/float(parlog.count()))*100)
flatwords = parlog.flatMap(lambda line: re.split('\W+', line.lower().strip()))
longwords = flatwords.filter(lambda x: len(x) > 2 )
longwords.take(10)
longwords = longwords.map(lambda word: (word, 1))
longcount = longwords.reduceByKey(lambda a, b: a + b)
longcount.take(10)
longwords = longcount.map(lambda x: (x[1], x[0])).sortByKey(False)
longwords.take(20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CrossCorrelation Example
Step2: Now let's turn noisy_1 and noisy_2 into Lightcurve objects. This way we have two Lightcurves to calculate CrossCorrelation.
Step3: 2. Create a CrossCorrelation Object from two Light curves created above
Step4: Now, Cross Correlation values are stored in attribute corr, which is called below.
Step5: 3. Plot Cross Correlation for Different lags
Step6: Given the Phase offset of pi/2 between two lightcurves created above, and freq=1 Hz, time_shift should be close to 0.25 sec. Small error is due to time resolution.
Step7: Modes of Correlation
Step8: The number of data points in corr and largest lightcurve are same in this mode.
Step9: Creating CrossCorrelation with full mode now using same data as above.
Step10: Full mode does a full cross-correlation.
Step11: Another Example
Step12: Yet another Example with longer Lingcurve
Step13: Converting noisy signals into Lightcurves.
Step14: Now, creating CrossCorrelation Object by passing lc1 and lc2 into the constructor.
Step15: time_shift is very close to 0.25 sec, in this case.
Step16: To create AutoCorrelation object, simply pass lightcurve into AutoCorrelation Constructor.<br> Using same Lighrcurve created above to demonstrate AutoCorrelation.
Step17: time_Shift for AutoCorrelation is always zero. Since signals are maximally correlated at zero lag.
Step18: Another Example
Step19: AutoCorrelation also supports {full,same,valid} modes similar to CrossCorrelation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from stingray import Lightcurve
from stingray.crosscorrelation import CrossCorrelation
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
%matplotlib inline
font_prop = font_manager.FontProperties(size=16)
dt = 0.03125 # seconds
exposure = 10. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 # counts/s
signal_2 = 300 * np.sin(2.*np.pi*freq*times + np.pi/2) + 1000 # counts/s
noisy_1 = np.random.poisson(signal_1*dt) # counts
noisy_2 = np.random.poisson(signal_2*dt) # counts
lc1 = Lightcurve(times, noisy_1)
lc2 = Lightcurve(times, noisy_2)
len(lc1)
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(lc1.time, lc1.counts, lw=2, color='blue')
ax.plot(lc1.time, lc2.counts, lw=2, color='red')
ax.set_xlabel("Time (s)", fontproperties=font_prop)
ax.set_ylabel("Counts (cts)", fontproperties=font_prop)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(which='major', width=1.5, length=7)
ax.tick_params(which='minor', width=1.5, length=4)
plt.show()
cr = CrossCorrelation(lc1, lc2)
cr.corr[:10]
# Time Resolution for Cross Correlation is same as that of each of the Lightcurves
cr.dt
cr.plot(labels = ['Time Lags (seconds)','Correlation'])
cr.time_shift #seconds
cr.mode
cr.n
cr1 = CrossCorrelation(lc1, lc2, mode = 'full')
cr1.plot()
cr1.mode
cr1.n
cs = CrossCorrelation()
cs.corr = np.array([ 660, 1790, 3026, 4019, 5164, 6647, 8105, 7023, 6012, 5162])
time_shift, time_lags, n = cs.cal_timeshift(dt=0.5)
time_shift
cs.plot( ['Time Lags (seconds)','Correlation'])
dt = 0.0001 # seconds
exposure = 50. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 * dt # counts/s
signal_2 = 200 * np.sin(2.*np.pi*freq*times + np.pi/2) + 900 * dt # counts/s
lc1 = Lightcurve(times, signal_1)
lc2 = Lightcurve(times, signal_2)
len(lc1)
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(lc1.time, lc1.counts, lw=2, color='blue')
ax.plot(lc1.time, lc2.counts, lw=2, color='red')
ax.set_xlabel("Time (s)", fontproperties=font_prop)
ax.set_ylabel("Counts (cts)", fontproperties=font_prop)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(which='major', width=1.5, length=7)
ax.tick_params(which='minor', width=1.5, length=4)
plt.show()
cs = CrossCorrelation(lc1, lc2)
print('Done')
cs.corr[:50]
# Time Resolution for Cross Correlation is same as that of each of the Lightcurves
cs.dt
cs.plot( ['Time Lags (seconds)','Correlation'])
cs.time_shift #seconds
from stingray.crosscorrelation import AutoCorrelation
lc = lc1
ac = AutoCorrelation(lc)
ac.n
ac.corr[:10]
ac.time_lags
ac.time_shift
ac.plot()
dt = 0.001 # seconds
exposure = 20. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 # counts/s
noisy_1 = np.random.poisson(signal_1*dt) # counts
lc = Lightcurve(times, noisy_1)
ac = AutoCorrelation(lc, mode = 'full')
ac.corr
ac.time_lags
ac.time_shift
ac.plot()
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.